{"blob_id": "e864327a837f59188aa01a10d763c0827779d836", "bodies": ["self.supervised_input = ImageNetInput(split=supervised_split, is_training=True, batch_size=supervised_batch_size, augmentation=supervised_augmentation, **kwargs)\nself.unsupervised_input = ImageNetInput(split='train', is_training=True, batch_size=unsupervised_batch_size, augmentation=unsupervised_augmentation, **kwargs)\nself.batch_size = self.supervised_input.batch_size\nself.num_images = self.supervised_input.num_images\nself.num_classes = NUM_CLASSES", "sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\nunsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\ndataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\ndataset = dataset.map(_combine_sup_unsup_datasets)\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\noptions = tf.data.Options()\noptions.experimental_deterministic = False\ndataset = dataset.with_options(options)\nreturn dataset"], "bodies_text": "<|body_start_0|>\n self.supervised_input = ImageNetInput(split=supervised_split, is_training=True, batch_size=supervised_batch_size, augmentation=supervised_augmentation, **kwargs)\n self.unsupervised_input = ImageNetInput(split='train', is_training=True, batch_size=unsupervised_batch_size, augmentation=unsupervised_augmentation, **kwargs)\n self.batch_size = self.supervised_input.batch_size\n self.num_images = self.supervised_input.num_images\n self.num_classes = NUM_CLASSES\n<|end_body_0|>\n\n<|body_start_1|>\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset = dataset.map(_combine_sup_unsup_datasets)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n return dataset\n<|end_body_1|>\n", "class_docstring": "Generates Imagenet input_fn for semi-supervised training.", "class_name": "ImageNetSslTrainInput", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImageNetSslTrainInput:\n \"\"\"Generates Imagenet input_fn for semi-supervised training.\"\"\"\n\n def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs):\n \"\"\"Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\"\"\"\n <|body_0|>\n\n def input_fn(self, ctx=None):\n \"\"\"Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.supervised_input = ImageNetInput(split=supervised_split, is_training=True, batch_size=supervised_batch_size, augmentation=supervised_augmentation, **kwargs)\n self.unsupervised_input = ImageNetInput(split='train', is_training=True, batch_size=unsupervised_batch_size, augmentation=unsupervised_augmentation, **kwargs)\n self.batch_size = self.supervised_input.batch_size\n self.num_images = self.supervised_input.num_images\n self.num_classes = NUM_CLASSES\n<|end_body_0|>\n\n<|body_start_1|>\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset = dataset.map(_combine_sup_unsup_datasets)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n return dataset\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000000", "length_bytes": 16434, "license_type": "permissive", "methods": [{"docstring": "Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.", "name": "__init__", "signature": "def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs)"}, {"docstring": "Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.", "name": "input_fn", "signature": "def input_fn(self, ctx=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000363", "prompt": "Implement the Python class `ImageNetSslTrainInput` described below.\n\nClass description:\nGenerates Imagenet input_fn for semi-supervised training.\n\nMethod signatures and docstrings:\n- def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs): Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\n- def input_fn(self, ctx=None): Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.", "prompted_full_text": "Implement the Python class `ImageNetSslTrainInput` described below.\n\nClass description:\nGenerates Imagenet input_fn for semi-supervised training.\n\nMethod signatures and docstrings:\n- def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs): Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\n- def input_fn(self, ctx=None): Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.\n\n<|skeleton|>\nclass ImageNetSslTrainInput:\n \"\"\"Generates Imagenet input_fn for semi-supervised training.\"\"\"\n\n def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs):\n \"\"\"Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\"\"\"\n <|body_0|>\n\n def input_fn(self, ctx=None):\n \"\"\"Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.supervised_input = ImageNetInput(split=supervised_split, is_training=True, batch_size=supervised_batch_size, augmentation=supervised_augmentation, **kwargs)\n self.unsupervised_input = ImageNetInput(split='train', is_training=True, batch_size=unsupervised_batch_size, augmentation=unsupervised_augmentation, **kwargs)\n self.batch_size = self.supervised_input.batch_size\n self.num_images = self.supervised_input.num_images\n self.num_classes = NUM_CLASSES\n<|end_body_0|>\n\n<|body_start_1|>\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset = dataset.map(_combine_sup_unsup_datasets)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n return dataset\n<|end_body_1|>\n", "revision_id": "f8b7f184b91d6144927c7c4b34f7d9c0313f8a39", "skeleton": "<|skeleton|>\nclass ImageNetSslTrainInput:\n \"\"\"Generates Imagenet input_fn for semi-supervised training.\"\"\"\n\n def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs):\n \"\"\"Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\"\"\"\n <|body_0|>\n\n def input_fn(self, ctx=None):\n \"\"\"Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ImageNetSslTrainInput:\n \"\"\"Generates Imagenet input_fn for semi-supervised training.\"\"\"\n\n def __init__(self, supervised_split, supervised_batch_size, unsupervised_batch_size, supervised_augmentation, unsupervised_augmentation, **kwargs):\n \"\"\"Initialize ImageNetSslTrainInput. Args: supervised_split: split of supervised data. supervised_batch_size: batch size for supervised data. unsupervised_batch_size: batch size for unsupervised data. supervised_augmentation: augmentation for supervised data. unsupervised_augmentation: augmentation for unsupervised data. **kwargs: other arguments which are directly passed to ImageNetInput.\"\"\"\n self.supervised_input = ImageNetInput(split=supervised_split, is_training=True, batch_size=supervised_batch_size, augmentation=supervised_augmentation, **kwargs)\n self.unsupervised_input = ImageNetInput(split='train', is_training=True, batch_size=unsupervised_batch_size, augmentation=unsupervised_augmentation, **kwargs)\n self.batch_size = self.supervised_input.batch_size\n self.num_images = self.supervised_input.num_images\n self.num_classes = NUM_CLASSES\n\n def input_fn(self, ctx=None):\n \"\"\"Input function which provides a single batch for training. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.\"\"\"\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset = dataset.map(_combine_sup_unsup_datasets)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n return dataset\n", "source": "the_stack_v2_python_sparse", "source_path": "imagenet/datasets/imagenet.py", "source_repo": "paulxiong/fixmatch", "split": "val", "star_events_count": 1} {"blob_id": "e8e05023d5d3a4d7d689422fe3aae4b55299e097", "bodies": ["with pytest.raises(KeyError):\n json_obj = {'not_key': ' ', 'not_val': ' '}\n parse_two_keys_dict(json_obj)", "json_obj = {'KEY': 'a key', 'VALUE': 'a value'}\nres = parse_two_keys_dict(json_obj)\nassert res['a key'] == 'a value'", "with pytest.raises(TypeError):\n json_obj = None\n parse_two_keys_dict(json_obj)"], "bodies_text": "<|body_start_0|>\n with pytest.raises(KeyError):\n json_obj = {'not_key': ' ', 'not_val': ' '}\n parse_two_keys_dict(json_obj)\n<|end_body_0|>\n\n<|body_start_1|>\n json_obj = {'KEY': 'a key', 'VALUE': 'a value'}\n res = parse_two_keys_dict(json_obj)\n assert res['a key'] == 'a value'\n<|end_body_1|>\n\n<|body_start_2|>\n with pytest.raises(TypeError):\n json_obj = None\n parse_two_keys_dict(json_obj)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestParseTwoKeysDict", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestParseTwoKeysDict:\n\n def test_parse_two_keys_dict_unexpected_format(self):\n \"\"\"Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\"\"\"\n <|body_0|>\n\n def test_parse_two_keys_dict_expected_format(self):\n \"\"\"Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\"\"\"\n <|body_1|>\n\n def test_parse_two_keys_dict_none_value(self):\n \"\"\"Given - json object When - the json object is None Then - raise a TypeError Exception\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with pytest.raises(KeyError):\n json_obj = {'not_key': ' ', 'not_val': ' '}\n parse_two_keys_dict(json_obj)\n<|end_body_0|>\n\n<|body_start_1|>\n json_obj = {'KEY': 'a key', 'VALUE': 'a value'}\n res = parse_two_keys_dict(json_obj)\n assert res['a key'] == 'a value'\n<|end_body_1|>\n\n<|body_start_2|>\n with pytest.raises(TypeError):\n json_obj = None\n parse_two_keys_dict(json_obj)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000001", "length_bytes": 44285, "license_type": "permissive", "methods": [{"docstring": "Given - json object When - the json object has unexpected format Then - raise a KeyError Exception", "name": "test_parse_two_keys_dict_unexpected_format", "signature": "def test_parse_two_keys_dict_unexpected_format(self)"}, {"docstring": "Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value", "name": "test_parse_two_keys_dict_expected_format", "signature": "def test_parse_two_keys_dict_expected_format(self)"}, {"docstring": "Given - json object When - the json object is None Then - raise a TypeError Exception", "name": "test_parse_two_keys_dict_none_value", "signature": "def test_parse_two_keys_dict_none_value(self)"}], "n_methods": 3, "prompt": "Implement the Python class `TestParseTwoKeysDict` described below.\n\nClass description:\nImplement the TestParseTwoKeysDict class.\n\nMethod signatures and docstrings:\n- def test_parse_two_keys_dict_unexpected_format(self): Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\n- def test_parse_two_keys_dict_expected_format(self): Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\n- def test_parse_two_keys_dict_none_value(self): Given - json object When - the json object is None Then - raise a TypeError Exception", "prompted_full_text": "Implement the Python class `TestParseTwoKeysDict` described below.\n\nClass description:\nImplement the TestParseTwoKeysDict class.\n\nMethod signatures and docstrings:\n- def test_parse_two_keys_dict_unexpected_format(self): Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\n- def test_parse_two_keys_dict_expected_format(self): Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\n- def test_parse_two_keys_dict_none_value(self): Given - json object When - the json object is None Then - raise a TypeError Exception\n\n<|skeleton|>\nclass TestParseTwoKeysDict:\n\n def test_parse_two_keys_dict_unexpected_format(self):\n \"\"\"Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\"\"\"\n <|body_0|>\n\n def test_parse_two_keys_dict_expected_format(self):\n \"\"\"Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\"\"\"\n <|body_1|>\n\n def test_parse_two_keys_dict_none_value(self):\n \"\"\"Given - json object When - the json object is None Then - raise a TypeError Exception\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with pytest.raises(KeyError):\n json_obj = {'not_key': ' ', 'not_val': ' '}\n parse_two_keys_dict(json_obj)\n<|end_body_0|>\n\n<|body_start_1|>\n json_obj = {'KEY': 'a key', 'VALUE': 'a value'}\n res = parse_two_keys_dict(json_obj)\n assert res['a key'] == 'a value'\n<|end_body_1|>\n\n<|body_start_2|>\n with pytest.raises(TypeError):\n json_obj = None\n parse_two_keys_dict(json_obj)\n<|end_body_2|>\n", "revision_id": "890def5a0e0ae8d6eaa538148249ddbc851dbb6b", "skeleton": "<|skeleton|>\nclass TestParseTwoKeysDict:\n\n def test_parse_two_keys_dict_unexpected_format(self):\n \"\"\"Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\"\"\"\n <|body_0|>\n\n def test_parse_two_keys_dict_expected_format(self):\n \"\"\"Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\"\"\"\n <|body_1|>\n\n def test_parse_two_keys_dict_none_value(self):\n \"\"\"Given - json object When - the json object is None Then - raise a TypeError Exception\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestParseTwoKeysDict:\n def test_parse_two_keys_dict_unexpected_format(self):\n \"\"\"Given - json object When - the json object has unexpected format Then - raise a KeyError Exception\"\"\"\n with pytest.raises(KeyError):\n json_obj = {'not_key': ' ', 'not_val': ' '}\n parse_two_keys_dict(json_obj)\n\n def test_parse_two_keys_dict_expected_format(self):\n \"\"\"Given - json object When - the json object has the expected format Then - return a new dictionary with correct key and value\"\"\"\n json_obj = {'KEY': 'a key', 'VALUE': 'a value'}\n res = parse_two_keys_dict(json_obj)\n assert res['a key'] == 'a value'\n\n def test_parse_two_keys_dict_none_value(self):\n \"\"\"Given - json object When - the json object is None Then - raise a TypeError Exception\"\"\"\n with pytest.raises(TypeError):\n json_obj = None\n parse_two_keys_dict(json_obj)\n", "source": "the_stack_v2_python_sparse", "source_path": "Packs/qualys/Integrations/Qualysv2/Qualysv2_test.py", "source_repo": "demisto/content", "split": "val", "star_events_count": 1023} {"blob_id": "a1d27eb64efdca8c60bfd2875c5dc565b24c1f6f", "bodies": ["if '@' in username:\n kwargs = {'email': username}\nelse:\n kwargs = {'username': username}\ntry:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\nexcept User.DoesNotExist:\n return None", "try:\n return User.objects.get(pk=user_id)\nexcept User.DoesNotExist:\n return None"], "bodies_text": "<|body_start_0|>\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\n except User.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n<|end_body_1|>\n", "class_docstring": "Allows user to authenticate with username or email address.", "class_name": "EmailOrUsernameModelBackend", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EmailOrUsernameModelBackend:\n \"\"\"Allows user to authenticate with username or email address.\"\"\"\n\n def authenticate(cls, username=None, password=None):\n \"\"\"Checks email or username.\"\"\"\n <|body_0|>\n\n def get_user(cls, user_id):\n \"\"\"Get user object by id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\n except User.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000002", "length_bytes": 998, "license_type": "no_license", "methods": [{"docstring": "Checks email or username.", "name": "authenticate", "signature": "def authenticate(cls, username=None, password=None)"}, {"docstring": "Get user object by id.", "name": "get_user", "signature": "def get_user(cls, user_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005892", "prompt": "Implement the Python class `EmailOrUsernameModelBackend` described below.\n\nClass description:\nAllows user to authenticate with username or email address.\n\nMethod signatures and docstrings:\n- def authenticate(cls, username=None, password=None): Checks email or username.\n- def get_user(cls, user_id): Get user object by id.", "prompted_full_text": "Implement the Python class `EmailOrUsernameModelBackend` described below.\n\nClass description:\nAllows user to authenticate with username or email address.\n\nMethod signatures and docstrings:\n- def authenticate(cls, username=None, password=None): Checks email or username.\n- def get_user(cls, user_id): Get user object by id.\n\n<|skeleton|>\nclass EmailOrUsernameModelBackend:\n \"\"\"Allows user to authenticate with username or email address.\"\"\"\n\n def authenticate(cls, username=None, password=None):\n \"\"\"Checks email or username.\"\"\"\n <|body_0|>\n\n def get_user(cls, user_id):\n \"\"\"Get user object by id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\n except User.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n<|end_body_1|>\n", "revision_id": "a780ccdc3350d4b5c7990c65d1af8d71060c62cc", "skeleton": "<|skeleton|>\nclass EmailOrUsernameModelBackend:\n \"\"\"Allows user to authenticate with username or email address.\"\"\"\n\n def authenticate(cls, username=None, password=None):\n \"\"\"Checks email or username.\"\"\"\n <|body_0|>\n\n def get_user(cls, user_id):\n \"\"\"Get user object by id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EmailOrUsernameModelBackend:\n \"\"\"Allows user to authenticate with username or email address.\"\"\"\n\n def authenticate(cls, username=None, password=None):\n \"\"\"Checks email or username.\"\"\"\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if user.check_password(password):\n return user\n except User.DoesNotExist:\n return None\n\n def get_user(cls, user_id):\n \"\"\"Get user object by id.\"\"\"\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "common/backends.py", "source_repo": "wcirillo/ten", "split": "val", "star_events_count": 0} {"blob_id": "f9311aaea85b4579481d753930a3ae6c8710222d", "bodies": ["f = {}\nd = {}\n\ndef find(x):\n f.setdefault(x, x)\n d.setdefault(x, 1)\n if x != f[x]:\n t = f[x]\n f[x] = find(t)\n d[x] *= d[t]\n return f[x]\n return x\n\ndef union(A, B, value):\n a, b = (find(A), find(B))\n if a != b:\n f[a] = b\n d[a] = d[B] / d[A] * value\n\ndef check(x, y):\n if x not in f or y not in f:\n return -1.0\n a, b = (find(x), find(y))\n if a != b:\n return -1.0\n return d[x] / d[y]\nfor i, nums in enumerate(equations):\n union(nums[0], nums[1], values[i])\nres = []\nfor x, y in queries:\n res.append(check(x, y))\nreturn res", "from collections import defaultdict\ngraph = defaultdict(int)\nset1 = set()\nfor i in range(len(equations)):\n a, b = equations[i]\n graph[a, b] = values[i]\n graph[b, a] = 1 / values[i]\n set1.add(a)\n set1.add(b)\narr = list(set1)\nfor k in arr:\n for i in arr:\n for j in arr:\n if graph[i, k] and graph[k, j]:\n graph[i, j] = graph[i, k] * graph[k, j]\nres = []\nfor x, y in queries:\n if graph[x, y]:\n res.append(graph[x, y])\n else:\n res.append(-1)\nreturn res"], "bodies_text": "<|body_start_0|>\n f = {}\n d = {}\n\n def find(x):\n f.setdefault(x, x)\n d.setdefault(x, 1)\n if x != f[x]:\n t = f[x]\n f[x] = find(t)\n d[x] *= d[t]\n return f[x]\n return x\n\n def union(A, B, value):\n a, b = (find(A), find(B))\n if a != b:\n f[a] = b\n d[a] = d[B] / d[A] * value\n\n def check(x, y):\n if x not in f or y not in f:\n return -1.0\n a, b = (find(x), find(y))\n if a != b:\n return -1.0\n return d[x] / d[y]\n for i, nums in enumerate(equations):\n union(nums[0], nums[1], values[i])\n res = []\n for x, y in queries:\n res.append(check(x, y))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n graph = defaultdict(int)\n set1 = set()\n for i in range(len(equations)):\n a, b = equations[i]\n graph[a, b] = values[i]\n graph[b, a] = 1 / values[i]\n set1.add(a)\n set1.add(b)\n arr = list(set1)\n for k in arr:\n for i in arr:\n for j in arr:\n if graph[i, k] and graph[k, j]:\n graph[i, j] = graph[i, k] * graph[k, j]\n res = []\n for x, y in queries:\n if graph[x, y]:\n res.append(graph[x, y])\n else:\n res.append(-1)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:并查集 @param equations: @param values: @param queries: @return:\"\"\"\n <|body_0|>\n\n def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:floyd @param equations: @param values: @param queries: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n f = {}\n d = {}\n\n def find(x):\n f.setdefault(x, x)\n d.setdefault(x, 1)\n if x != f[x]:\n t = f[x]\n f[x] = find(t)\n d[x] *= d[t]\n return f[x]\n return x\n\n def union(A, B, value):\n a, b = (find(A), find(B))\n if a != b:\n f[a] = b\n d[a] = d[B] / d[A] * value\n\n def check(x, y):\n if x not in f or y not in f:\n return -1.0\n a, b = (find(x), find(y))\n if a != b:\n return -1.0\n return d[x] / d[y]\n for i, nums in enumerate(equations):\n union(nums[0], nums[1], values[i])\n res = []\n for x, y in queries:\n res.append(check(x, y))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n graph = defaultdict(int)\n set1 = set()\n for i in range(len(equations)):\n a, b = equations[i]\n graph[a, b] = values[i]\n graph[b, a] = 1 / values[i]\n set1.add(a)\n set1.add(b)\n arr = list(set1)\n for k in arr:\n for i in arr:\n for j in arr:\n if graph[i, k] and graph[k, j]:\n graph[i, j] = graph[i, k] * graph[k, j]\n res = []\n for x, y in queries:\n if graph[x, y]:\n res.append(graph[x, y])\n else:\n res.append(-1)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000003", "length_bytes": 4321, "license_type": "no_license", "methods": [{"docstring": "思路:并查集 @param equations: @param values: @param queries: @return:", "name": "calcEquation1", "signature": "def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]"}, {"docstring": "思路:floyd @param equations: @param values: @param queries: @return:", "name": "calcEquation2", "signature": "def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]: 思路:并查集 @param equations: @param values: @param queries: @return:\n- def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]: 思路:floyd @param equations: @param values: @param queries: @return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]: 思路:并查集 @param equations: @param values: @param queries: @return:\n- def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]: 思路:floyd @param equations: @param values: @param queries: @return:\n\n<|skeleton|>\nclass Solution:\n\n def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:并查集 @param equations: @param values: @param queries: @return:\"\"\"\n <|body_0|>\n\n def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:floyd @param equations: @param values: @param queries: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n f = {}\n d = {}\n\n def find(x):\n f.setdefault(x, x)\n d.setdefault(x, 1)\n if x != f[x]:\n t = f[x]\n f[x] = find(t)\n d[x] *= d[t]\n return f[x]\n return x\n\n def union(A, B, value):\n a, b = (find(A), find(B))\n if a != b:\n f[a] = b\n d[a] = d[B] / d[A] * value\n\n def check(x, y):\n if x not in f or y not in f:\n return -1.0\n a, b = (find(x), find(y))\n if a != b:\n return -1.0\n return d[x] / d[y]\n for i, nums in enumerate(equations):\n union(nums[0], nums[1], values[i])\n res = []\n for x, y in queries:\n res.append(check(x, y))\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n graph = defaultdict(int)\n set1 = set()\n for i in range(len(equations)):\n a, b = equations[i]\n graph[a, b] = values[i]\n graph[b, a] = 1 / values[i]\n set1.add(a)\n set1.add(b)\n arr = list(set1)\n for k in arr:\n for i in arr:\n for j in arr:\n if graph[i, k] and graph[k, j]:\n graph[i, j] = graph[i, k] * graph[k, j]\n res = []\n for x, y in queries:\n if graph[x, y]:\n res.append(graph[x, y])\n else:\n res.append(-1)\n return res\n<|end_body_1|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:并查集 @param equations: @param values: @param queries: @return:\"\"\"\n <|body_0|>\n\n def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:floyd @param equations: @param values: @param queries: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def calcEquation1(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:并查集 @param equations: @param values: @param queries: @return:\"\"\"\n f = {}\n d = {}\n\n def find(x):\n f.setdefault(x, x)\n d.setdefault(x, 1)\n if x != f[x]:\n t = f[x]\n f[x] = find(t)\n d[x] *= d[t]\n return f[x]\n return x\n\n def union(A, B, value):\n a, b = (find(A), find(B))\n if a != b:\n f[a] = b\n d[a] = d[B] / d[A] * value\n\n def check(x, y):\n if x not in f or y not in f:\n return -1.0\n a, b = (find(x), find(y))\n if a != b:\n return -1.0\n return d[x] / d[y]\n for i, nums in enumerate(equations):\n union(nums[0], nums[1], values[i])\n res = []\n for x, y in queries:\n res.append(check(x, y))\n return res\n\n def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"思路:floyd @param equations: @param values: @param queries: @return:\"\"\"\n from collections import defaultdict\n graph = defaultdict(int)\n set1 = set()\n for i in range(len(equations)):\n a, b = equations[i]\n graph[a, b] = values[i]\n graph[b, a] = 1 / values[i]\n set1.add(a)\n set1.add(b)\n arr = list(set1)\n for k in arr:\n for i in arr:\n for j in arr:\n if graph[i, k] and graph[k, j]:\n graph[i, j] = graph[i, k] * graph[k, j]\n res = []\n for x, y in queries:\n if graph[x, y]:\n res.append(graph[x, y])\n else:\n res.append(-1)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/并查集/399. 除法求值.py", "source_repo": "yiming1012/MyLeetCode", "split": "val", "star_events_count": 2} {"blob_id": "a8391d3f203bf47ff321870d6e6c860562ccfbf3", "bodies": ["super(ExtensionClient, self).__init__(serialize_format, deserialize_format)\nself.auth_token = auth_token\nself.default_headers['X-Auth-Token'] = auth_token\nct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\naccept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\nself.default_headers['Content-Type'] = ct\nself.default_headers['Accept'] = accept\nself.url = url", "url = '{0}/extensions'.format(self.url)\nresponse = self.request('GET', url, response_entity_type=Extensions, requestslib_kwargs=requestslib_kwargs)\nreturn response"], "bodies_text": "<|body_start_0|>\n super(ExtensionClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{0}/extensions'.format(self.url)\n response = self.request('GET', url, response_entity_type=Extensions, requestslib_kwargs=requestslib_kwargs)\n return response\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ExtensionClient", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExtensionClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None):\n \"\"\"@param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\"\"\"\n <|body_0|>\n\n def list_extensions(self, requestslib_kwargs=None):\n \"\"\"@summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ExtensionClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{0}/extensions'.format(self.url)\n response = self.request('GET', url, response_entity_type=Extensions, requestslib_kwargs=requestslib_kwargs)\n return response\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000004", "length_bytes": 2383, "license_type": "permissive", "methods": [{"docstring": "@param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String", "name": "__init__", "signature": "def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None)"}, {"docstring": "@summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response", "name": "list_extensions", "signature": "def list_extensions(self, requestslib_kwargs=None)"}], "n_methods": 2, "prompt": "Implement the Python class `ExtensionClient` described below.\n\nClass description:\nImplement the ExtensionClient class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None): @param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\n- def list_extensions(self, requestslib_kwargs=None): @summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response", "prompted_full_text": "Implement the Python class `ExtensionClient` described below.\n\nClass description:\nImplement the ExtensionClient class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None): @param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\n- def list_extensions(self, requestslib_kwargs=None): @summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response\n\n<|skeleton|>\nclass ExtensionClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None):\n \"\"\"@param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\"\"\"\n <|body_0|>\n\n def list_extensions(self, requestslib_kwargs=None):\n \"\"\"@summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ExtensionClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{0}/extensions'.format(self.url)\n response = self.request('GET', url, response_entity_type=Extensions, requestslib_kwargs=requestslib_kwargs)\n return response\n<|end_body_1|>\n", "revision_id": "7d49cf6bfd7e1a6e5b739e7de52f2e18e5ccf924", "skeleton": "<|skeleton|>\nclass ExtensionClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None):\n \"\"\"@param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\"\"\"\n <|body_0|>\n\n def list_extensions(self, requestslib_kwargs=None):\n \"\"\"@summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExtensionClient:\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None):\n \"\"\"@param url: Base URL for the compute service @type url: String @param auth_token: Auth token to be used for all requests @type auth_token: String @param serialize_format: Format for serializing requests @type serialize_format: String @param deserialize_format: Format for de-serializing responses @type deserialize_format: String\"\"\"\n super(ExtensionClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n self.url = url\n\n def list_extensions(self, requestslib_kwargs=None):\n \"\"\"@summary: Lists all the extensions. Maps to /extensions @return: response @rtype: Response\"\"\"\n url = '{0}/extensions'.format(self.url)\n response = self.request('GET', url, response_entity_type=Extensions, requestslib_kwargs=requestslib_kwargs)\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "cloudcafe/compute/extensions/extensions_api/clients/extensions_client.py", "source_repo": "kurhula/cloudcafe", "split": "val", "star_events_count": 0} {"blob_id": "696750e4d5bfae81463edd0318b501a49948a0a2", "bodies": ["if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\nmsk[:] = flt[:]\nmsk[:, :, 0] = 0.0", "if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\nflt[:] = msk[:]\nflt[:, :, 0] = 0.0"], "bodies_text": "<|body_start_0|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n msk[:] = flt[:]\n msk[:, :, 0] = 0.0\n<|end_body_0|>\n\n<|body_start_1|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n flt[:] = msk[:]\n flt[:, :, 0] = 0.0\n<|end_body_1|>\n", "class_docstring": "Mask operator for not updating the zero lag coefficient", "class_name": "peflv2dmask", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass peflv2dmask:\n \"\"\"Mask operator for not updating the zero lag coefficient\"\"\"\n\n def forward(self, add, flt, msk):\n \"\"\"Applies the mask to the filter\"\"\"\n <|body_0|>\n\n def adjoint(self, add, flt, msk):\n \"\"\"Applies adjoint mask\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n msk[:] = flt[:]\n msk[:, :, 0] = 0.0\n<|end_body_0|>\n\n<|body_start_1|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n flt[:] = msk[:]\n flt[:, :, 0] = 0.0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000005", "length_bytes": 9076, "license_type": "no_license", "methods": [{"docstring": "Applies the mask to the filter", "name": "forward", "signature": "def forward(self, add, flt, msk)"}, {"docstring": "Applies adjoint mask", "name": "adjoint", "signature": "def adjoint(self, add, flt, msk)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005886", "prompt": "Implement the Python class `peflv2dmask` described below.\n\nClass description:\nMask operator for not updating the zero lag coefficient\n\nMethod signatures and docstrings:\n- def forward(self, add, flt, msk): Applies the mask to the filter\n- def adjoint(self, add, flt, msk): Applies adjoint mask", "prompted_full_text": "Implement the Python class `peflv2dmask` described below.\n\nClass description:\nMask operator for not updating the zero lag coefficient\n\nMethod signatures and docstrings:\n- def forward(self, add, flt, msk): Applies the mask to the filter\n- def adjoint(self, add, flt, msk): Applies adjoint mask\n\n<|skeleton|>\nclass peflv2dmask:\n \"\"\"Mask operator for not updating the zero lag coefficient\"\"\"\n\n def forward(self, add, flt, msk):\n \"\"\"Applies the mask to the filter\"\"\"\n <|body_0|>\n\n def adjoint(self, add, flt, msk):\n \"\"\"Applies adjoint mask\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n msk[:] = flt[:]\n msk[:, :, 0] = 0.0\n<|end_body_0|>\n\n<|body_start_1|>\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n flt[:] = msk[:]\n flt[:, :, 0] = 0.0\n<|end_body_1|>\n", "revision_id": "db8c81f6a98cd665a493b54099eae1d28ee092e7", "skeleton": "<|skeleton|>\nclass peflv2dmask:\n \"\"\"Mask operator for not updating the zero lag coefficient\"\"\"\n\n def forward(self, add, flt, msk):\n \"\"\"Applies the mask to the filter\"\"\"\n <|body_0|>\n\n def adjoint(self, add, flt, msk):\n \"\"\"Applies adjoint mask\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class peflv2dmask:\n \"\"\"Mask operator for not updating the zero lag coefficient\"\"\"\n\n def forward(self, add, flt, msk):\n \"\"\"Applies the mask to the filter\"\"\"\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n msk[:] = flt[:]\n msk[:, :, 0] = 0.0\n\n def adjoint(self, add, flt, msk):\n \"\"\"Applies adjoint mask\"\"\"\n if flt.shape != msk.shape:\n raise Exception('model and data must have same shape')\n flt[:] = msk[:]\n flt[:, :, 0] = 0.0\n", "source": "the_stack_v2_python_sparse", "source_path": "adf/adf/nstat/peflv2d.py", "source_repo": "ke0m/process_f3_data", "split": "val", "star_events_count": 1} {"blob_id": "bd0f1abfcf830758fb58ba5e12d93d44f79d7085", "bodies": ["super(FCModel, self).__init__()\nsizes.insert(0, n_features)\nlayers = [nn.Linear(size_in, size_out) for size_in, size_out in zip(sizes[:-1], sizes[1:])]\nself.input_norm = nn.LayerNorm(n_features) if input_norm else nn.Identity()\nself.activation = nn.Identity() if activation is None else instantiate_class('torch.nn.modules.activation', activation)\nself.dropout = nn.Dropout(dropout or 0.0)\nself.output_size = sizes[-1]\nself.layers = nn.ModuleList(layers)", "x = self.input_norm(x)\nfor layer in self.layers:\n x = self.dropout(self.activation(layer(x)))\nreturn x"], "bodies_text": "<|body_start_0|>\n super(FCModel, self).__init__()\n sizes.insert(0, n_features)\n layers = [nn.Linear(size_in, size_out) for size_in, size_out in zip(sizes[:-1], sizes[1:])]\n self.input_norm = nn.LayerNorm(n_features) if input_norm else nn.Identity()\n self.activation = nn.Identity() if activation is None else instantiate_class('torch.nn.modules.activation', activation)\n self.dropout = nn.Dropout(dropout or 0.0)\n self.output_size = sizes[-1]\n self.layers = nn.ModuleList(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.input_norm(x)\n for layer in self.layers:\n x = self.dropout(self.activation(layer(x)))\n return x\n<|end_body_1|>\n", "class_docstring": "This class represents a fully connected neural network model with given layer sizes and activation function.", "class_name": "FCModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FCModel:\n \"\"\"This class represents a fully connected neural network model with given layer sizes and activation function.\"\"\"\n\n def __init__(self, sizes, input_norm, activation, dropout, n_features):\n \"\"\":param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FCModel, self).__init__()\n sizes.insert(0, n_features)\n layers = [nn.Linear(size_in, size_out) for size_in, size_out in zip(sizes[:-1], sizes[1:])]\n self.input_norm = nn.LayerNorm(n_features) if input_norm else nn.Identity()\n self.activation = nn.Identity() if activation is None else instantiate_class('torch.nn.modules.activation', activation)\n self.dropout = nn.Dropout(dropout or 0.0)\n self.output_size = sizes[-1]\n self.layers = nn.ModuleList(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.input_norm(x)\n for layer in self.layers:\n x = self.dropout(self.activation(layer(x)))\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000006", "length_bytes": 21238, "license_type": "no_license", "methods": [{"docstring": ":param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features", "name": "__init__", "signature": "def __init__(self, sizes, input_norm, activation, dropout, n_features)"}, {"docstring": "Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `FCModel` described below.\n\nClass description:\nThis class represents a fully connected neural network model with given layer sizes and activation function.\n\nMethod signatures and docstrings:\n- def __init__(self, sizes, input_norm, activation, dropout, n_features): :param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\n- def forward(self, x): Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]", "prompted_full_text": "Implement the Python class `FCModel` described below.\n\nClass description:\nThis class represents a fully connected neural network model with given layer sizes and activation function.\n\nMethod signatures and docstrings:\n- def __init__(self, sizes, input_norm, activation, dropout, n_features): :param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\n- def forward(self, x): Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]\n\n<|skeleton|>\nclass FCModel:\n \"\"\"This class represents a fully connected neural network model with given layer sizes and activation function.\"\"\"\n\n def __init__(self, sizes, input_norm, activation, dropout, n_features):\n \"\"\":param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FCModel, self).__init__()\n sizes.insert(0, n_features)\n layers = [nn.Linear(size_in, size_out) for size_in, size_out in zip(sizes[:-1], sizes[1:])]\n self.input_norm = nn.LayerNorm(n_features) if input_norm else nn.Identity()\n self.activation = nn.Identity() if activation is None else instantiate_class('torch.nn.modules.activation', activation)\n self.dropout = nn.Dropout(dropout or 0.0)\n self.output_size = sizes[-1]\n self.layers = nn.ModuleList(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.input_norm(x)\n for layer in self.layers:\n x = self.dropout(self.activation(layer(x)))\n return x\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass FCModel:\n \"\"\"This class represents a fully connected neural network model with given layer sizes and activation function.\"\"\"\n\n def __init__(self, sizes, input_norm, activation, dropout, n_features):\n \"\"\":param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FCModel:\n \"\"\"This class represents a fully connected neural network model with given layer sizes and activation function.\"\"\"\n\n def __init__(self, sizes, input_norm, activation, dropout, n_features):\n \"\"\":param sizes: list of layer sizes (excluding the input layer size which is given by n_features parameter) :param input_norm: flag indicating whether to perform layer normalization on the input :param activation: name of the PyTorch activation function, e.g. Sigmoid or Tanh :param dropout: dropout probability :param n_features: number of input features\"\"\"\n super(FCModel, self).__init__()\n sizes.insert(0, n_features)\n layers = [nn.Linear(size_in, size_out) for size_in, size_out in zip(sizes[:-1], sizes[1:])]\n self.input_norm = nn.LayerNorm(n_features) if input_norm else nn.Identity()\n self.activation = nn.Identity() if activation is None else instantiate_class('torch.nn.modules.activation', activation)\n self.dropout = nn.Dropout(dropout or 0.0)\n self.output_size = sizes[-1]\n self.layers = nn.ModuleList(layers)\n\n def forward(self, x):\n \"\"\"Forward pass through the FCModel. :param x: input of shape [batch_size, slate_length, self.layers[0].in_features] :return: output of shape [batch_size, slate_length, self.output_size]\"\"\"\n x = self.input_norm(x)\n for layer in self.layers:\n x = self.dropout(self.activation(layer(x)))\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_allegro_allRank.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "544c7f2f4c4324c4d7cd7529fa85c984d79a8087", "bodies": ["assert type_constraint in IMMUTABLE_TYPES or issubclass(type_constraint, tuple) or issubclass(type_constraint, frozenset) or issubclass(type_constraint, HotProperty)\nself.type_constraint = type_constraint\nsuper(TypedHotList, self).__init__(init_iterable, name, container)", "if not isinstance(val, self.type_constraint):\n raise TypeError('Only %s allowed here.' % self.type_constraint)\nif isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\nreturn val", "if type(val) in IMMUTABLE_TYPES:\n return val\nif isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\nraise TypeError('Only number/strings and tuples/frozensets allowed here.')"], "bodies_text": "<|body_start_0|>\n assert type_constraint in IMMUTABLE_TYPES or issubclass(type_constraint, tuple) or issubclass(type_constraint, frozenset) or issubclass(type_constraint, HotProperty)\n self.type_constraint = type_constraint\n super(TypedHotList, self).__init__(init_iterable, name, container)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(val, self.type_constraint):\n raise TypeError('Only %s allowed here.' % self.type_constraint)\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n<|end_body_1|>\n\n<|body_start_2|>\n if type(val) in IMMUTABLE_TYPES:\n return val\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n raise TypeError('Only number/strings and tuples/frozensets allowed here.')\n<|end_body_2|>\n", "class_docstring": "TypedHotList is a HotList variant that can restrict it's items to the provided type.", "class_name": "TypedHotList", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TypedHotList:\n \"\"\"TypedHotList is a HotList variant that can restrict it's items to the provided type.\"\"\"\n\n def __init__(self, type_constraint, init_iterable=None, name=None, container=None):\n \"\"\"Initializes the structure, sets the type all items in the list must be.\"\"\"\n <|body_0|>\n\n def _validate_value(self, val):\n \"\"\"The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\"\"\"\n <|body_1|>\n\n def _validate_sub_value(self, val):\n \"\"\"Called from _validate_value, checks that the supplied value is immutable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert type_constraint in IMMUTABLE_TYPES or issubclass(type_constraint, tuple) or issubclass(type_constraint, frozenset) or issubclass(type_constraint, HotProperty)\n self.type_constraint = type_constraint\n super(TypedHotList, self).__init__(init_iterable, name, container)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(val, self.type_constraint):\n raise TypeError('Only %s allowed here.' % self.type_constraint)\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n<|end_body_1|>\n\n<|body_start_2|>\n if type(val) in IMMUTABLE_TYPES:\n return val\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n raise TypeError('Only number/strings and tuples/frozensets allowed here.')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000007", "length_bytes": 13089, "license_type": "permissive", "methods": [{"docstring": "Initializes the structure, sets the type all items in the list must be.", "name": "__init__", "signature": "def __init__(self, type_constraint, init_iterable=None, name=None, container=None)"}, {"docstring": "The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.", "name": "_validate_value", "signature": "def _validate_value(self, val)"}, {"docstring": "Called from _validate_value, checks that the supplied value is immutable.", "name": "_validate_sub_value", "signature": "def _validate_sub_value(self, val)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000265", "prompt": "Implement the Python class `TypedHotList` described below.\n\nClass description:\nTypedHotList is a HotList variant that can restrict it's items to the provided type.\n\nMethod signatures and docstrings:\n- def __init__(self, type_constraint, init_iterable=None, name=None, container=None): Initializes the structure, sets the type all items in the list must be.\n- def _validate_value(self, val): The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\n- def _validate_sub_value(self, val): Called from _validate_value, checks that the supplied value is immutable.", "prompted_full_text": "Implement the Python class `TypedHotList` described below.\n\nClass description:\nTypedHotList is a HotList variant that can restrict it's items to the provided type.\n\nMethod signatures and docstrings:\n- def __init__(self, type_constraint, init_iterable=None, name=None, container=None): Initializes the structure, sets the type all items in the list must be.\n- def _validate_value(self, val): The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\n- def _validate_sub_value(self, val): Called from _validate_value, checks that the supplied value is immutable.\n\n<|skeleton|>\nclass TypedHotList:\n \"\"\"TypedHotList is a HotList variant that can restrict it's items to the provided type.\"\"\"\n\n def __init__(self, type_constraint, init_iterable=None, name=None, container=None):\n \"\"\"Initializes the structure, sets the type all items in the list must be.\"\"\"\n <|body_0|>\n\n def _validate_value(self, val):\n \"\"\"The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\"\"\"\n <|body_1|>\n\n def _validate_sub_value(self, val):\n \"\"\"Called from _validate_value, checks that the supplied value is immutable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert type_constraint in IMMUTABLE_TYPES or issubclass(type_constraint, tuple) or issubclass(type_constraint, frozenset) or issubclass(type_constraint, HotProperty)\n self.type_constraint = type_constraint\n super(TypedHotList, self).__init__(init_iterable, name, container)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(val, self.type_constraint):\n raise TypeError('Only %s allowed here.' % self.type_constraint)\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n<|end_body_1|>\n\n<|body_start_2|>\n if type(val) in IMMUTABLE_TYPES:\n return val\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n raise TypeError('Only number/strings and tuples/frozensets allowed here.')\n<|end_body_2|>\n", "revision_id": "9ce498d7dbfe285b2da4b6a8d62582ff0fb19239", "skeleton": "<|skeleton|>\nclass TypedHotList:\n \"\"\"TypedHotList is a HotList variant that can restrict it's items to the provided type.\"\"\"\n\n def __init__(self, type_constraint, init_iterable=None, name=None, container=None):\n \"\"\"Initializes the structure, sets the type all items in the list must be.\"\"\"\n <|body_0|>\n\n def _validate_value(self, val):\n \"\"\"The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\"\"\"\n <|body_1|>\n\n def _validate_sub_value(self, val):\n \"\"\"Called from _validate_value, checks that the supplied value is immutable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TypedHotList:\n \"\"\"TypedHotList is a HotList variant that can restrict it's items to the provided type.\"\"\"\n\n def __init__(self, type_constraint, init_iterable=None, name=None, container=None):\n \"\"\"Initializes the structure, sets the type all items in the list must be.\"\"\"\n assert type_constraint in IMMUTABLE_TYPES or issubclass(type_constraint, tuple) or issubclass(type_constraint, frozenset) or issubclass(type_constraint, HotProperty)\n self.type_constraint = type_constraint\n super(TypedHotList, self).__init__(init_iterable, name, container)\n\n def _validate_value(self, val):\n \"\"\"The members may only be self.type_constraint. If the type_constraint is a tuple (or set) then it is also checked that the member's members are unmutable.\"\"\"\n if not isinstance(val, self.type_constraint):\n raise TypeError('Only %s allowed here.' % self.type_constraint)\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n\n def _validate_sub_value(self, val):\n \"\"\"Called from _validate_value, checks that the supplied value is immutable.\"\"\"\n if type(val) in IMMUTABLE_TYPES:\n return val\n if isinstance(val, tuple) or isinstance(val, frozenset):\n for i in val:\n self._validate_sub_value(i)\n return val\n raise TypeError('Only number/strings and tuples/frozensets allowed here.')\n", "source": "the_stack_v2_python_sparse", "source_path": "step07/hotmodel.py", "source_repo": "petrblahos/modellerkit", "split": "val", "star_events_count": 0} {"blob_id": "f81589595b5d6f558750c690776b6008ef9e4228", "bodies": ["sale_return_groups = self.env['sale.return'].sudo().read_group(domain=[('sale_order', '=', self.ids)], fields=['sale_order'], groupby=['sale_order'])\norders = self.browse()\nfor group in sale_return_groups:\n print('_compute_retuns', group)\n sale_order = self.browse(group['sale_order'][0])\n while sale_order:\n if sale_order in self:\n sale_order.return_order_count += group['sale_order_count']\n orders |= sale_order\n sale_order = False\n(self - orders).return_order_count = 0", "action = self.env['ir.actions.act_window']._for_xml_id('website_return_management.sale_return_action')\ndomain = [('sale_order', '=', self.id)]\naction['domain'] = domain\naction['context'] = {'search_default_order': 1}\nreturn action"], "bodies_text": "<|body_start_0|>\n sale_return_groups = self.env['sale.return'].sudo().read_group(domain=[('sale_order', '=', self.ids)], fields=['sale_order'], groupby=['sale_order'])\n orders = self.browse()\n for group in sale_return_groups:\n print('_compute_retuns', group)\n sale_order = self.browse(group['sale_order'][0])\n while sale_order:\n if sale_order in self:\n sale_order.return_order_count += group['sale_order_count']\n orders |= sale_order\n sale_order = False\n (self - orders).return_order_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n action = self.env['ir.actions.act_window']._for_xml_id('website_return_management.sale_return_action')\n domain = [('sale_order', '=', self.id)]\n action['domain'] = domain\n action['context'] = {'search_default_order': 1}\n return action\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SaleOrder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SaleOrder:\n\n def _compute_retuns(self):\n \"\"\"method to compute return count\"\"\"\n <|body_0|>\n\n def action_open_returns(self):\n \"\"\"This function returns an action that displays the sale return orders from sale order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sale_return_groups = self.env['sale.return'].sudo().read_group(domain=[('sale_order', '=', self.ids)], fields=['sale_order'], groupby=['sale_order'])\n orders = self.browse()\n for group in sale_return_groups:\n print('_compute_retuns', group)\n sale_order = self.browse(group['sale_order'][0])\n while sale_order:\n if sale_order in self:\n sale_order.return_order_count += group['sale_order_count']\n orders |= sale_order\n sale_order = False\n (self - orders).return_order_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n action = self.env['ir.actions.act_window']._for_xml_id('website_return_management.sale_return_action')\n domain = [('sale_order', '=', self.id)]\n action['domain'] = domain\n action['context'] = {'search_default_order': 1}\n return action\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000008", "length_bytes": 2353, "license_type": "no_license", "methods": [{"docstring": "method to compute return count", "name": "_compute_retuns", "signature": "def _compute_retuns(self)"}, {"docstring": "This function returns an action that displays the sale return orders from sale order", "name": "action_open_returns", "signature": "def action_open_returns(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006236", "prompt": "Implement the Python class `SaleOrder` described below.\n\nClass description:\nImplement the SaleOrder class.\n\nMethod signatures and docstrings:\n- def _compute_retuns(self): method to compute return count\n- def action_open_returns(self): This function returns an action that displays the sale return orders from sale order", "prompted_full_text": "Implement the Python class `SaleOrder` described below.\n\nClass description:\nImplement the SaleOrder class.\n\nMethod signatures and docstrings:\n- def _compute_retuns(self): method to compute return count\n- def action_open_returns(self): This function returns an action that displays the sale return orders from sale order\n\n<|skeleton|>\nclass SaleOrder:\n\n def _compute_retuns(self):\n \"\"\"method to compute return count\"\"\"\n <|body_0|>\n\n def action_open_returns(self):\n \"\"\"This function returns an action that displays the sale return orders from sale order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sale_return_groups = self.env['sale.return'].sudo().read_group(domain=[('sale_order', '=', self.ids)], fields=['sale_order'], groupby=['sale_order'])\n orders = self.browse()\n for group in sale_return_groups:\n print('_compute_retuns', group)\n sale_order = self.browse(group['sale_order'][0])\n while sale_order:\n if sale_order in self:\n sale_order.return_order_count += group['sale_order_count']\n orders |= sale_order\n sale_order = False\n (self - orders).return_order_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n action = self.env['ir.actions.act_window']._for_xml_id('website_return_management.sale_return_action')\n domain = [('sale_order', '=', self.id)]\n action['domain'] = domain\n action['context'] = {'search_default_order': 1}\n return action\n<|end_body_1|>\n", "revision_id": "4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14", "skeleton": "<|skeleton|>\nclass SaleOrder:\n\n def _compute_retuns(self):\n \"\"\"method to compute return count\"\"\"\n <|body_0|>\n\n def action_open_returns(self):\n \"\"\"This function returns an action that displays the sale return orders from sale order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SaleOrder:\n def _compute_retuns(self):\n \"\"\"method to compute return count\"\"\"\n sale_return_groups = self.env['sale.return'].sudo().read_group(domain=[('sale_order', '=', self.ids)], fields=['sale_order'], groupby=['sale_order'])\n orders = self.browse()\n for group in sale_return_groups:\n print('_compute_retuns', group)\n sale_order = self.browse(group['sale_order'][0])\n while sale_order:\n if sale_order in self:\n sale_order.return_order_count += group['sale_order_count']\n orders |= sale_order\n sale_order = False\n (self - orders).return_order_count = 0\n\n def action_open_returns(self):\n \"\"\"This function returns an action that displays the sale return orders from sale order\"\"\"\n action = self.env['ir.actions.act_window']._for_xml_id('website_return_management.sale_return_action')\n domain = [('sale_order', '=', self.id)]\n action['domain'] = domain\n action['context'] = {'search_default_order': 1}\n return action\n", "source": "the_stack_v2_python_sparse", "source_path": "website_return_management/models/sale_order.py", "source_repo": "CybroOdoo/CybroAddons", "split": "val", "star_events_count": 209} {"blob_id": "f3b156b14db822e938d402def0fb9196cff9cf8b", "bodies": ["result = 0\nif nums[0] == 0:\n nums[0] = -1\nfor i in range(1, len(nums)):\n if nums[i] == 0:\n nums[i] = -1\n nums[i] += nums[i - 1]\n if nums[i] == 0:\n result = max(result, i + 1)\n elif nums[i] in nums[:i]:\n b = nums[:i].index(nums[i])\n result = max(result, i - b)\nreturn result", "res = 0\nfirst_ID = dict()\nfirst_ID[0] = -1\ncnt0, cnt1 = (0, 0)\nfor i, x in enumerate(nums):\n cnt0 += x == 0\n cnt1 += x == 1\n d = cnt0 - cnt1\n if d in first_ID:\n res = max(res, i - first_ID[d])\n else:\n first_ID[d] = i\nreturn res"], "bodies_text": "<|body_start_0|>\n result = 0\n if nums[0] == 0:\n nums[0] = -1\n for i in range(1, len(nums)):\n if nums[i] == 0:\n nums[i] = -1\n nums[i] += nums[i - 1]\n if nums[i] == 0:\n result = max(result, i + 1)\n elif nums[i] in nums[:i]:\n b = nums[:i].index(nums[i])\n result = max(result, i - b)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = 0\n first_ID = dict()\n first_ID[0] = -1\n cnt0, cnt1 = (0, 0)\n for i, x in enumerate(nums):\n cnt0 += x == 0\n cnt1 += x == 1\n d = cnt0 - cnt1\n if d in first_ID:\n res = max(res, i - first_ID[d])\n else:\n first_ID[d] = i\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findMaxLength(self, nums: List[int]) -> int:\n \"\"\"连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\"\"\"\n <|body_0|>\n\n def findMaxLength2(self, nums: List[int]) -> int:\n \"\"\"前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n if nums[0] == 0:\n nums[0] = -1\n for i in range(1, len(nums)):\n if nums[i] == 0:\n nums[i] = -1\n nums[i] += nums[i - 1]\n if nums[i] == 0:\n result = max(result, i + 1)\n elif nums[i] in nums[:i]:\n b = nums[:i].index(nums[i])\n result = max(result, i - b)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = 0\n first_ID = dict()\n first_ID[0] = -1\n cnt0, cnt1 = (0, 0)\n for i, x in enumerate(nums):\n cnt0 += x == 0\n cnt1 += x == 1\n d = cnt0 - cnt1\n if d in first_ID:\n res = max(res, i - first_ID[d])\n else:\n first_ID[d] = i\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000009", "length_bytes": 2225, "license_type": "no_license", "methods": [{"docstring": "连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:", "name": "findMaxLength", "signature": "def findMaxLength(self, nums: List[int]) -> int"}, {"docstring": "前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:", "name": "findMaxLength2", "signature": "def findMaxLength2(self, nums: List[int]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004637", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMaxLength(self, nums: List[int]) -> int: 连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\n- def findMaxLength2(self, nums: List[int]) -> int: 前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMaxLength(self, nums: List[int]) -> int: 连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\n- def findMaxLength2(self, nums: List[int]) -> int: 前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:\n\n<|skeleton|>\nclass Solution:\n\n def findMaxLength(self, nums: List[int]) -> int:\n \"\"\"连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\"\"\"\n <|body_0|>\n\n def findMaxLength2(self, nums: List[int]) -> int:\n \"\"\"前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n if nums[0] == 0:\n nums[0] = -1\n for i in range(1, len(nums)):\n if nums[i] == 0:\n nums[i] = -1\n nums[i] += nums[i - 1]\n if nums[i] == 0:\n result = max(result, i + 1)\n elif nums[i] in nums[:i]:\n b = nums[:i].index(nums[i])\n result = max(result, i - b)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = 0\n first_ID = dict()\n first_ID[0] = -1\n cnt0, cnt1 = (0, 0)\n for i, x in enumerate(nums):\n cnt0 += x == 0\n cnt1 += x == 1\n d = cnt0 - cnt1\n if d in first_ID:\n res = max(res, i - first_ID[d])\n else:\n first_ID[d] = i\n return res\n<|end_body_1|>\n", "revision_id": "b1680014ce3f55ba952a1e64241c0cbb783cc436", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findMaxLength(self, nums: List[int]) -> int:\n \"\"\"连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\"\"\"\n <|body_0|>\n\n def findMaxLength2(self, nums: List[int]) -> int:\n \"\"\"前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findMaxLength(self, nums: List[int]) -> int:\n \"\"\"连续的一个list中. 0和1的个数相同. 要找最长的. 第一次0比1多k个与下一次0比1多k个的情况发生时, 中间的一段就是result,找最长的result. 把0换成-1, 一个个相加之后等于0时就能知道数量. 需要一个表记录sum. [0, 1, 0, 1, 1, 0] 0 0 -> index[0, 1]-1 -> 0 -> index[0, 5] -> 5 T(n^2) S(1) 超时 :param nums: :return:\"\"\"\n result = 0\n if nums[0] == 0:\n nums[0] = -1\n for i in range(1, len(nums)):\n if nums[i] == 0:\n nums[i] = -1\n nums[i] += nums[i - 1]\n if nums[i] == 0:\n result = max(result, i + 1)\n elif nums[i] in nums[:i]:\n b = nums[:i].index(nums[i])\n result = max(result, i - b)\n return result\n\n def findMaxLength2(self, nums: List[int]) -> int:\n \"\"\"前缀和, 0和1的数量差 hashmap保存数量差 T(n) S(n) :param nums: :return:\"\"\"\n res = 0\n first_ID = dict()\n first_ID[0] = -1\n cnt0, cnt1 = (0, 0)\n for i, x in enumerate(nums):\n cnt0 += x == 0\n cnt1 += x == 1\n d = cnt0 - cnt1\n if d in first_ID:\n res = max(res, i - first_ID[d])\n else:\n first_ID[d] = i\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "a_525.py", "source_repo": "sun510001/leetcode_jianzhi_offer_2", "split": "val", "star_events_count": 0} {"blob_id": "1da40abcc7caf561ac2a64892193e910f6916ceb", "bodies": ["hl = md5()\nhl.update(msg.encode('utf-8'))\nreturn hl.hexdigest()", "sh = sha1()\nsh.update(msg.encode('utf-8'))\nreturn sh.hexdigest()", "sh = SHA256.new()\nsh.update(msg.encode('utf-8'))\nreturn sh.hexdigest()", "de = DES.new(key, DES.MODE_ECB)\nmss = msg + (8 - len(msg) % 8) * '='\ntext = de.encrypt(mss.encode())\nreturn binascii.b2a_hex(text).decode()", "obj = AES.new(key, AES.MODE_CBC, vi)\ntxt = obj.encrypt(msg.encode())\nreturn binascii.b2a_hex(txt).decode()", "msg = binascii.a2b_hex(msg)\nobj = AES.new(key, AES.MODE_CBC, vi)\nreturn obj.decrypt(msg).decode()"], "bodies_text": "<|body_start_0|>\n hl = md5()\n hl.update(msg.encode('utf-8'))\n return hl.hexdigest()\n<|end_body_0|>\n\n<|body_start_1|>\n sh = sha1()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_1|>\n\n<|body_start_2|>\n sh = SHA256.new()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_2|>\n\n<|body_start_3|>\n de = DES.new(key, DES.MODE_ECB)\n mss = msg + (8 - len(msg) % 8) * '='\n text = de.encrypt(mss.encode())\n return binascii.b2a_hex(text).decode()\n<|end_body_3|>\n\n<|body_start_4|>\n obj = AES.new(key, AES.MODE_CBC, vi)\n txt = obj.encrypt(msg.encode())\n return binascii.b2a_hex(txt).decode()\n<|end_body_4|>\n\n<|body_start_5|>\n msg = binascii.a2b_hex(msg)\n obj = AES.new(key, AES.MODE_CBC, vi)\n return obj.decrypt(msg).decode()\n<|end_body_5|>\n", "class_docstring": "", "class_name": "MyHash", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyHash:\n\n def my_md5(self, msg):\n \"\"\"md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_0|>\n\n def my_sha1(self, msg):\n \"\"\"sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_1|>\n\n def my_sha256(self, msg):\n \"\"\"sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_2|>\n\n def my_des(self, msg, key):\n \"\"\"DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\"\"\"\n <|body_3|>\n\n def my_aes_encrypt(self, msg, key, vi):\n \"\"\"AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_4|>\n\n def my_aes_decrypt(self, msg, key, vi):\n \"\"\"AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hl = md5()\n hl.update(msg.encode('utf-8'))\n return hl.hexdigest()\n<|end_body_0|>\n\n<|body_start_1|>\n sh = sha1()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_1|>\n\n<|body_start_2|>\n sh = SHA256.new()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_2|>\n\n<|body_start_3|>\n de = DES.new(key, DES.MODE_ECB)\n mss = msg + (8 - len(msg) % 8) * '='\n text = de.encrypt(mss.encode())\n return binascii.b2a_hex(text).decode()\n<|end_body_3|>\n\n<|body_start_4|>\n obj = AES.new(key, AES.MODE_CBC, vi)\n txt = obj.encrypt(msg.encode())\n return binascii.b2a_hex(txt).decode()\n<|end_body_4|>\n\n<|body_start_5|>\n msg = binascii.a2b_hex(msg)\n obj = AES.new(key, AES.MODE_CBC, vi)\n return obj.decrypt(msg).decode()\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000010", "length_bytes": 2376, "license_type": "no_license", "methods": [{"docstring": "md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符", "name": "my_md5", "signature": "def my_md5(self, msg)"}, {"docstring": "sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符", "name": "my_sha1", "signature": "def my_sha1(self, msg)"}, {"docstring": "sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符", "name": "my_sha256", "signature": "def my_sha256(self, msg)"}, {"docstring": "DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符", "name": "my_des", "signature": "def my_des(self, msg, key)"}, {"docstring": "AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符", "name": "my_aes_encrypt", "signature": "def my_aes_encrypt(self, msg, key, vi)"}, {"docstring": "AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符", "name": "my_aes_decrypt", "signature": "def my_aes_decrypt(self, msg, key, vi)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_005269", "prompt": "Implement the Python class `MyHash` described below.\n\nClass description:\nImplement the MyHash class.\n\nMethod signatures and docstrings:\n- def my_md5(self, msg): md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_sha1(self, msg): sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_sha256(self, msg): sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_des(self, msg, key): DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\n- def my_aes_encrypt(self, msg, key, vi): AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\n- def my_aes_decrypt(self, msg, key, vi): AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符", "prompted_full_text": "Implement the Python class `MyHash` described below.\n\nClass description:\nImplement the MyHash class.\n\nMethod signatures and docstrings:\n- def my_md5(self, msg): md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_sha1(self, msg): sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_sha256(self, msg): sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\n- def my_des(self, msg, key): DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\n- def my_aes_encrypt(self, msg, key, vi): AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\n- def my_aes_decrypt(self, msg, key, vi): AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\n\n<|skeleton|>\nclass MyHash:\n\n def my_md5(self, msg):\n \"\"\"md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_0|>\n\n def my_sha1(self, msg):\n \"\"\"sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_1|>\n\n def my_sha256(self, msg):\n \"\"\"sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_2|>\n\n def my_des(self, msg, key):\n \"\"\"DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\"\"\"\n <|body_3|>\n\n def my_aes_encrypt(self, msg, key, vi):\n \"\"\"AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_4|>\n\n def my_aes_decrypt(self, msg, key, vi):\n \"\"\"AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hl = md5()\n hl.update(msg.encode('utf-8'))\n return hl.hexdigest()\n<|end_body_0|>\n\n<|body_start_1|>\n sh = sha1()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_1|>\n\n<|body_start_2|>\n sh = SHA256.new()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n<|end_body_2|>\n\n<|body_start_3|>\n de = DES.new(key, DES.MODE_ECB)\n mss = msg + (8 - len(msg) % 8) * '='\n text = de.encrypt(mss.encode())\n return binascii.b2a_hex(text).decode()\n<|end_body_3|>\n\n<|body_start_4|>\n obj = AES.new(key, AES.MODE_CBC, vi)\n txt = obj.encrypt(msg.encode())\n return binascii.b2a_hex(txt).decode()\n<|end_body_4|>\n\n<|body_start_5|>\n msg = binascii.a2b_hex(msg)\n obj = AES.new(key, AES.MODE_CBC, vi)\n return obj.decrypt(msg).decode()\n<|end_body_5|>\n", "revision_id": "8dd873977444818d0515d51d6552db3e0c318bb2", "skeleton": "<|skeleton|>\nclass MyHash:\n\n def my_md5(self, msg):\n \"\"\"md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_0|>\n\n def my_sha1(self, msg):\n \"\"\"sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_1|>\n\n def my_sha256(self, msg):\n \"\"\"sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n <|body_2|>\n\n def my_des(self, msg, key):\n \"\"\"DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\"\"\"\n <|body_3|>\n\n def my_aes_encrypt(self, msg, key, vi):\n \"\"\"AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_4|>\n\n def my_aes_decrypt(self, msg, key, vi):\n \"\"\"AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyHash:\n def my_md5(self, msg):\n \"\"\"md5 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n hl = md5()\n hl.update(msg.encode('utf-8'))\n return hl.hexdigest()\n\n def my_sha1(self, msg):\n \"\"\"sha1 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n sh = sha1()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n\n def my_sha256(self, msg):\n \"\"\"sha256 算法加密 :param msg: 需加密的字符串 :return: 加密后的字符\"\"\"\n sh = SHA256.new()\n sh.update(msg.encode('utf-8'))\n return sh.hexdigest()\n\n def my_des(self, msg, key):\n \"\"\"DES 算法加密 :param msg: 需加密的字符串,长度必须为8的倍数,不足添加'=' :param key: 8个字符 :return: 加密后的字符\"\"\"\n de = DES.new(key, DES.MODE_ECB)\n mss = msg + (8 - len(msg) % 8) * '='\n text = de.encrypt(mss.encode())\n return binascii.b2a_hex(text).decode()\n\n def my_aes_encrypt(self, msg, key, vi):\n \"\"\"AES 算法的加密 :param msg: 需加密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n obj = AES.new(key, AES.MODE_CBC, vi)\n txt = obj.encrypt(msg.encode())\n return binascii.b2a_hex(txt).decode()\n\n def my_aes_decrypt(self, msg, key, vi):\n \"\"\"AES 算法的解密 :param msg: 需解密的字符串 :param key: 必须为16,24,32位 :param vi: 必须为16位 :return: 加密后的字符\"\"\"\n msg = binascii.a2b_hex(msg)\n obj = AES.new(key, AES.MODE_CBC, vi)\n return obj.decrypt(msg).decode()\n", "source": "the_stack_v2_python_sparse", "source_path": "Common/Hash.py", "source_repo": "chenanming/API_Auto_Test", "split": "val", "star_events_count": 0} {"blob_id": "ffe6c2fb8d0d870d0a344837f9cbf85cc64d8bfa", "bodies": ["super(Aggregator, self).__init__()\nprint('\\nLoading patch model from [%s]...' % args.patch_snapshot)\ntry:\n patch_model = torch.load(args.patch_snapshot).cpu()\n self.patch_model = strip_model(patch_model)\nexcept Exception as e:\n raise Exception(\"Couldn't load patch model at {}. Error: {}\".format(args.patch_snapshot, e))\nargs.wrap_model = False\nself.args = args\nif args.multi_image:\n img_size = (args.num_images, *args.img_size)\nargs.hidden_dim = get_output_size(self.patch_model, img_size, args.num_chan, args.cuda)\nfc1_dim = max(2056, args.hidden_dim / 8)\nfc2_dim = max(1024, args.hidden_dim / 16)\nself.fc1 = nn.Linear(args.hidden_dim, fc1_dim)\nself.fc2 = nn.Linear(fc1_dim, fc2_dim)\nself.fc_final = nn.Linear(fc2_dim, args.num_classes)", "patch_hidden = self.patch_model(x)\npatch_hidden = patch_hidden.view(patch_hidden.size()[0], -1)\npatch_hidden = F.relu(self.fc1(patch_hidden))\nhidden = F.relu(self.fc2(patch_hidden))\nlogit = self.fc_final(hidden)\nreturn logit"], "bodies_text": "<|body_start_0|>\n super(Aggregator, self).__init__()\n print('\\nLoading patch model from [%s]...' % args.patch_snapshot)\n try:\n patch_model = torch.load(args.patch_snapshot).cpu()\n self.patch_model = strip_model(patch_model)\n except Exception as e:\n raise Exception(\"Couldn't load patch model at {}. Error: {}\".format(args.patch_snapshot, e))\n args.wrap_model = False\n self.args = args\n if args.multi_image:\n img_size = (args.num_images, *args.img_size)\n args.hidden_dim = get_output_size(self.patch_model, img_size, args.num_chan, args.cuda)\n fc1_dim = max(2056, args.hidden_dim / 8)\n fc2_dim = max(1024, args.hidden_dim / 16)\n self.fc1 = nn.Linear(args.hidden_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.fc_final = nn.Linear(fc2_dim, args.num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n patch_hidden = self.patch_model(x)\n patch_hidden = patch_hidden.view(patch_hidden.size()[0], -1)\n patch_hidden = F.relu(self.fc1(patch_hidden))\n hidden = F.relu(self.fc2(patch_hidden))\n logit = self.fc_final(hidden)\n return logit\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Aggregator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Aggregator:\n\n def __init__(self, args):\n \"\"\"Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Aggregator, self).__init__()\n print('\\nLoading patch model from [%s]...' % args.patch_snapshot)\n try:\n patch_model = torch.load(args.patch_snapshot).cpu()\n self.patch_model = strip_model(patch_model)\n except Exception as e:\n raise Exception(\"Couldn't load patch model at {}. Error: {}\".format(args.patch_snapshot, e))\n args.wrap_model = False\n self.args = args\n if args.multi_image:\n img_size = (args.num_images, *args.img_size)\n args.hidden_dim = get_output_size(self.patch_model, img_size, args.num_chan, args.cuda)\n fc1_dim = max(2056, args.hidden_dim / 8)\n fc2_dim = max(1024, args.hidden_dim / 16)\n self.fc1 = nn.Linear(args.hidden_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.fc_final = nn.Linear(fc2_dim, args.num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n patch_hidden = self.patch_model(x)\n patch_hidden = patch_hidden.view(patch_hidden.size()[0], -1)\n patch_hidden = F.relu(self.fc1(patch_hidden))\n hidden = F.relu(self.fc2(patch_hidden))\n logit = self.fc_final(hidden)\n return logit\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000011", "length_bytes": 1832, "license_type": "permissive", "methods": [{"docstring": "Given some a patch model, add add some FC layers and a shortcut to make whole image prediction", "name": "__init__", "signature": "def __init__(self, args)"}, {"docstring": "param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003921", "prompt": "Implement the Python class `Aggregator` described below.\n\nClass description:\nImplement the Aggregator class.\n\nMethod signatures and docstrings:\n- def __init__(self, args): Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\n- def forward(self, x): param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)", "prompted_full_text": "Implement the Python class `Aggregator` described below.\n\nClass description:\nImplement the Aggregator class.\n\nMethod signatures and docstrings:\n- def __init__(self, args): Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\n- def forward(self, x): param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)\n\n<|skeleton|>\nclass Aggregator:\n\n def __init__(self, args):\n \"\"\"Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Aggregator, self).__init__()\n print('\\nLoading patch model from [%s]...' % args.patch_snapshot)\n try:\n patch_model = torch.load(args.patch_snapshot).cpu()\n self.patch_model = strip_model(patch_model)\n except Exception as e:\n raise Exception(\"Couldn't load patch model at {}. Error: {}\".format(args.patch_snapshot, e))\n args.wrap_model = False\n self.args = args\n if args.multi_image:\n img_size = (args.num_images, *args.img_size)\n args.hidden_dim = get_output_size(self.patch_model, img_size, args.num_chan, args.cuda)\n fc1_dim = max(2056, args.hidden_dim / 8)\n fc2_dim = max(1024, args.hidden_dim / 16)\n self.fc1 = nn.Linear(args.hidden_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.fc_final = nn.Linear(fc2_dim, args.num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n patch_hidden = self.patch_model(x)\n patch_hidden = patch_hidden.view(patch_hidden.size()[0], -1)\n patch_hidden = F.relu(self.fc1(patch_hidden))\n hidden = F.relu(self.fc2(patch_hidden))\n logit = self.fc_final(hidden)\n return logit\n<|end_body_1|>\n", "revision_id": "12bace8fd6ce9c5bb129fd0d30a46a00a2f7b054", "skeleton": "<|skeleton|>\nclass Aggregator:\n\n def __init__(self, args):\n \"\"\"Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Aggregator:\n def __init__(self, args):\n \"\"\"Given some a patch model, add add some FC layers and a shortcut to make whole image prediction\"\"\"\n super(Aggregator, self).__init__()\n print('\\nLoading patch model from [%s]...' % args.patch_snapshot)\n try:\n patch_model = torch.load(args.patch_snapshot).cpu()\n self.patch_model = strip_model(patch_model)\n except Exception as e:\n raise Exception(\"Couldn't load patch model at {}. Error: {}\".format(args.patch_snapshot, e))\n args.wrap_model = False\n self.args = args\n if args.multi_image:\n img_size = (args.num_images, *args.img_size)\n args.hidden_dim = get_output_size(self.patch_model, img_size, args.num_chan, args.cuda)\n fc1_dim = max(2056, args.hidden_dim / 8)\n fc2_dim = max(1024, args.hidden_dim / 16)\n self.fc1 = nn.Linear(args.hidden_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.fc_final = nn.Linear(fc2_dim, args.num_classes)\n\n def forward(self, x):\n \"\"\"param x: a batch of image tensors returns hidden: last hidden layer of model (as if wrapper wasn't applied)\"\"\"\n patch_hidden = self.patch_model(x)\n patch_hidden = patch_hidden.view(patch_hidden.size()[0], -1)\n patch_hidden = F.relu(self.fc1(patch_hidden))\n hidden = F.relu(self.fc2(patch_hidden))\n logit = self.fc_final(hidden)\n return logit\n", "source": "the_stack_v2_python_sparse", "source_path": "onconet/models/aggregator.py", "source_repo": "yala/Mirai", "split": "val", "star_events_count": 66} {"blob_id": "8922059c4e2849de9cc2493f7b1906c78bf4d942", "bodies": ["mock_system.return_value = 'Darwin'\nself.assertTrue(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\nself.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\nself.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.get_os_name() == 'MAC')", "mock_system.return_value = 'Linux'\nself.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\nself.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')", "mock_system.return_value = 'Windows'\nself.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\nself.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.get_os_name() == 'WINDOWS')", "mock_system.return_value = 'None'\nself.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\nself.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\nself.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')"], "bodies_text": "<|body_start_0|>\n mock_system.return_value = 'Darwin'\n self.assertTrue(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'MAC')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_system.return_value = 'Linux'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_1|>\n\n<|body_start_2|>\n mock_system.return_value = 'Windows'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'WINDOWS')\n<|end_body_2|>\n\n<|body_start_3|>\n mock_system.return_value = 'None'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_3|>\n", "class_docstring": "Unit tests for android_dev_os.", "class_name": "IdeUtilUnittests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IdeUtilUnittests:\n \"\"\"Unit tests for android_dev_os.\"\"\"\n\n def test_get_os_info_mac(self, mock_system):\n \"\"\"Test with running in Mac\"\"\"\n <|body_0|>\n\n def test_get_os_info_linux(self, mock_system):\n \"\"\"Test with running in Linux\"\"\"\n <|body_1|>\n\n def test_get_os_info_windows(self, mock_system):\n \"\"\"Test with running in Windows\"\"\"\n <|body_2|>\n\n def test_get_os_info_default(self, mock_system):\n \"\"\"Test with running in unknown\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mock_system.return_value = 'Darwin'\n self.assertTrue(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'MAC')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_system.return_value = 'Linux'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_1|>\n\n<|body_start_2|>\n mock_system.return_value = 'Windows'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'WINDOWS')\n<|end_body_2|>\n\n<|body_start_3|>\n mock_system.return_value = 'None'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000012", "length_bytes": 2818, "license_type": "no_license", "methods": [{"docstring": "Test with running in Mac", "name": "test_get_os_info_mac", "signature": "def test_get_os_info_mac(self, mock_system)"}, {"docstring": "Test with running in Linux", "name": "test_get_os_info_linux", "signature": "def test_get_os_info_linux(self, mock_system)"}, {"docstring": "Test with running in Windows", "name": "test_get_os_info_windows", "signature": "def test_get_os_info_windows(self, mock_system)"}, {"docstring": "Test with running in unknown", "name": "test_get_os_info_default", "signature": "def test_get_os_info_default(self, mock_system)"}], "n_methods": 4, "prompt": "Implement the Python class `IdeUtilUnittests` described below.\n\nClass description:\nUnit tests for android_dev_os.\n\nMethod signatures and docstrings:\n- def test_get_os_info_mac(self, mock_system): Test with running in Mac\n- def test_get_os_info_linux(self, mock_system): Test with running in Linux\n- def test_get_os_info_windows(self, mock_system): Test with running in Windows\n- def test_get_os_info_default(self, mock_system): Test with running in unknown", "prompted_full_text": "Implement the Python class `IdeUtilUnittests` described below.\n\nClass description:\nUnit tests for android_dev_os.\n\nMethod signatures and docstrings:\n- def test_get_os_info_mac(self, mock_system): Test with running in Mac\n- def test_get_os_info_linux(self, mock_system): Test with running in Linux\n- def test_get_os_info_windows(self, mock_system): Test with running in Windows\n- def test_get_os_info_default(self, mock_system): Test with running in unknown\n\n<|skeleton|>\nclass IdeUtilUnittests:\n \"\"\"Unit tests for android_dev_os.\"\"\"\n\n def test_get_os_info_mac(self, mock_system):\n \"\"\"Test with running in Mac\"\"\"\n <|body_0|>\n\n def test_get_os_info_linux(self, mock_system):\n \"\"\"Test with running in Linux\"\"\"\n <|body_1|>\n\n def test_get_os_info_windows(self, mock_system):\n \"\"\"Test with running in Windows\"\"\"\n <|body_2|>\n\n def test_get_os_info_default(self, mock_system):\n \"\"\"Test with running in unknown\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mock_system.return_value = 'Darwin'\n self.assertTrue(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'MAC')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_system.return_value = 'Linux'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_1|>\n\n<|body_start_2|>\n mock_system.return_value = 'Windows'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'WINDOWS')\n<|end_body_2|>\n\n<|body_start_3|>\n mock_system.return_value = 'None'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n<|end_body_3|>\n", "revision_id": "78a61ca023cbf1a0cecfef8b97df2b274ac3a988", "skeleton": "<|skeleton|>\nclass IdeUtilUnittests:\n \"\"\"Unit tests for android_dev_os.\"\"\"\n\n def test_get_os_info_mac(self, mock_system):\n \"\"\"Test with running in Mac\"\"\"\n <|body_0|>\n\n def test_get_os_info_linux(self, mock_system):\n \"\"\"Test with running in Linux\"\"\"\n <|body_1|>\n\n def test_get_os_info_windows(self, mock_system):\n \"\"\"Test with running in Windows\"\"\"\n <|body_2|>\n\n def test_get_os_info_default(self, mock_system):\n \"\"\"Test with running in unknown\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IdeUtilUnittests:\n \"\"\"Unit tests for android_dev_os.\"\"\"\n\n def test_get_os_info_mac(self, mock_system):\n \"\"\"Test with running in Mac\"\"\"\n mock_system.return_value = 'Darwin'\n self.assertTrue(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'MAC')\n\n def test_get_os_info_linux(self, mock_system):\n \"\"\"Test with running in Linux\"\"\"\n mock_system.return_value = 'Linux'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n\n def test_get_os_info_windows(self, mock_system):\n \"\"\"Test with running in Windows\"\"\"\n mock_system.return_value = 'Windows'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'WINDOWS')\n\n def test_get_os_info_default(self, mock_system):\n \"\"\"Test with running in unknown\"\"\"\n mock_system.return_value = 'None'\n self.assertFalse(AndroidDevOS.MAC == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.LINUX == AndroidDevOS.get_os_type())\n self.assertFalse(AndroidDevOS.WINDOWS == AndroidDevOS.get_os_type())\n self.assertTrue(AndroidDevOS.get_os_name() == 'LINUX')\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/asuite/aidegen/lib/android_dev_os_unittest.py", "source_repo": "ZYHGOD-1/Aosp11", "split": "val", "star_events_count": 0} {"blob_id": "67e5670b3e365348f8797cfe2df8113397c3eee7", "bodies": ["expected = '{SHA}X+lk6KR7JuJEH43YnmettCwICdU='\nresult = user.encodePassword('MoinMoin')\nself.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\nresult = user.encodePassword(u'MoinMoin')\nself.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())", "result = user.encodePassword(u'סיסמה סודית בהחלט')\nexpected = '{SHA}GvvkgYzv5MoF9Ljivv2oc81FmkE='\nself.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())"], "bodies_text": "<|body_start_0|>\n expected = '{SHA}X+lk6KR7JuJEH43YnmettCwICdU='\n result = user.encodePassword('MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n result = user.encodePassword(u'MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_0|>\n\n<|body_start_1|>\n result = user.encodePassword(u'סיסמה סודית בהחלט')\n expected = '{SHA}GvvkgYzv5MoF9Ljivv2oc81FmkE='\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_1|>\n", "class_docstring": "user: encode passwords tests", "class_name": "EncodePasswordTestCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncodePasswordTestCase:\n \"\"\"user: encode passwords tests\"\"\"\n\n def testAscii(self):\n \"\"\"user: encode ascii password\"\"\"\n <|body_0|>\n\n def testUnicode(self):\n \"\"\"user: encode unicode password\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expected = '{SHA}X+lk6KR7JuJEH43YnmettCwICdU='\n result = user.encodePassword('MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n result = user.encodePassword(u'MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_0|>\n\n<|body_start_1|>\n result = user.encodePassword(u'סיסמה סודית בהחלט')\n expected = '{SHA}GvvkgYzv5MoF9Ljivv2oc81FmkE='\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000013", "length_bytes": 8790, "license_type": "no_license", "methods": [{"docstring": "user: encode ascii password", "name": "testAscii", "signature": "def testAscii(self)"}, {"docstring": "user: encode unicode password", "name": "testUnicode", "signature": "def testUnicode(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003350", "prompt": "Implement the Python class `EncodePasswordTestCase` described below.\n\nClass description:\nuser: encode passwords tests\n\nMethod signatures and docstrings:\n- def testAscii(self): user: encode ascii password\n- def testUnicode(self): user: encode unicode password", "prompted_full_text": "Implement the Python class `EncodePasswordTestCase` described below.\n\nClass description:\nuser: encode passwords tests\n\nMethod signatures and docstrings:\n- def testAscii(self): user: encode ascii password\n- def testUnicode(self): user: encode unicode password\n\n<|skeleton|>\nclass EncodePasswordTestCase:\n \"\"\"user: encode passwords tests\"\"\"\n\n def testAscii(self):\n \"\"\"user: encode ascii password\"\"\"\n <|body_0|>\n\n def testUnicode(self):\n \"\"\"user: encode unicode password\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expected = '{SHA}X+lk6KR7JuJEH43YnmettCwICdU='\n result = user.encodePassword('MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n result = user.encodePassword(u'MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_0|>\n\n<|body_start_1|>\n result = user.encodePassword(u'סיסמה סודית בהחלט')\n expected = '{SHA}GvvkgYzv5MoF9Ljivv2oc81FmkE='\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n<|end_body_1|>\n", "revision_id": "a2c30c3b742c65fb2c5bfbab1267d643823882a5", "skeleton": "<|skeleton|>\nclass EncodePasswordTestCase:\n \"\"\"user: encode passwords tests\"\"\"\n\n def testAscii(self):\n \"\"\"user: encode ascii password\"\"\"\n <|body_0|>\n\n def testUnicode(self):\n \"\"\"user: encode unicode password\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EncodePasswordTestCase:\n \"\"\"user: encode passwords tests\"\"\"\n\n def testAscii(self):\n \"\"\"user: encode ascii password\"\"\"\n expected = '{SHA}X+lk6KR7JuJEH43YnmettCwICdU='\n result = user.encodePassword('MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n result = user.encodePassword(u'MoinMoin')\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n\n def testUnicode(self):\n \"\"\"user: encode unicode password\"\"\"\n result = user.encodePassword(u'סיסמה סודית בהחלט')\n expected = '{SHA}GvvkgYzv5MoF9Ljivv2oc81FmkE='\n self.assertEqual(result, expected, 'Expected \"%(expected)s\" but got \"%(result)s\"' % locals())\n", "source": "the_stack_v2_python_sparse", "source_path": "mysocietyorg/moin/lib/python2.4/site-packages/MoinMoin/_tests/test_user.py", "source_repo": "MyfanwyNixon/orgsites", "split": "val", "star_events_count": 0} {"blob_id": "9620a20580fdfac9dfa7b9b7b4ac3ddd8662711b", "bodies": ["notification = ContainerChange(obj=self, name='measurements')\nif index is None:\n index = len(self.measurements)\n self.measurements.append(measurement)\nelse:\n self.measurements.insert(index, measurement)\nnotification.add_operation('added', (index, measurement))\nself.changed(notification)", "if not isinstance(measurements, Iterable):\n measurements = [measurements]\nnotification = ContainerChange(obj=self, name='measurements')\nfor measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\nself.changed(notification)", "measurement = self.measurements[old]\ndel self.measurements[old]\nself.measurements.insert(new, measurement)\nnotification = ContainerChange(obj=self, name='measurements')\nnotification.add_operation('moved', (old, new, measurement))\nself.changed(notification)"], "bodies_text": "<|body_start_0|>\n notification = ContainerChange(obj=self, name='measurements')\n if index is None:\n index = len(self.measurements)\n self.measurements.append(measurement)\n else:\n self.measurements.insert(index, measurement)\n notification.add_operation('added', (index, measurement))\n self.changed(notification)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(measurements, Iterable):\n measurements = [measurements]\n notification = ContainerChange(obj=self, name='measurements')\n for measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\n self.changed(notification)\n<|end_body_1|>\n\n<|body_start_2|>\n measurement = self.measurements[old]\n del self.measurements[old]\n self.measurements.insert(new, measurement)\n notification = ContainerChange(obj=self, name='measurements')\n notification.add_operation('moved', (old, new, measurement))\n self.changed(notification)\n<|end_body_2|>\n", "class_docstring": "Generic container for measurements.", "class_name": "MeasurementContainer", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MeasurementContainer:\n \"\"\"Generic container for measurements.\"\"\"\n\n def add(self, measurement, index=None):\n \"\"\"Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\"\"\"\n <|body_0|>\n\n def remove(self, measurements):\n \"\"\"Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\"\"\"\n <|body_1|>\n\n def move(self, old, new):\n \"\"\"Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n notification = ContainerChange(obj=self, name='measurements')\n if index is None:\n index = len(self.measurements)\n self.measurements.append(measurement)\n else:\n self.measurements.insert(index, measurement)\n notification.add_operation('added', (index, measurement))\n self.changed(notification)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(measurements, Iterable):\n measurements = [measurements]\n notification = ContainerChange(obj=self, name='measurements')\n for measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\n self.changed(notification)\n<|end_body_1|>\n\n<|body_start_2|>\n measurement = self.measurements[old]\n del self.measurements[old]\n self.measurements.insert(new, measurement)\n notification = ContainerChange(obj=self, name='measurements')\n notification.add_operation('moved', (old, new, measurement))\n self.changed(notification)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000014", "length_bytes": 2809, "license_type": "permissive", "methods": [{"docstring": "Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.", "name": "add", "signature": "def add(self, measurement, index=None)"}, {"docstring": "Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.", "name": "remove", "signature": "def remove(self, measurements)"}, {"docstring": "Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.", "name": "move", "signature": "def move(self, old, new)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003974", "prompt": "Implement the Python class `MeasurementContainer` described below.\n\nClass description:\nGeneric container for measurements.\n\nMethod signatures and docstrings:\n- def add(self, measurement, index=None): Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\n- def remove(self, measurements): Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\n- def move(self, old, new): Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.", "prompted_full_text": "Implement the Python class `MeasurementContainer` described below.\n\nClass description:\nGeneric container for measurements.\n\nMethod signatures and docstrings:\n- def add(self, measurement, index=None): Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\n- def remove(self, measurements): Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\n- def move(self, old, new): Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.\n\n<|skeleton|>\nclass MeasurementContainer:\n \"\"\"Generic container for measurements.\"\"\"\n\n def add(self, measurement, index=None):\n \"\"\"Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\"\"\"\n <|body_0|>\n\n def remove(self, measurements):\n \"\"\"Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\"\"\"\n <|body_1|>\n\n def move(self, old, new):\n \"\"\"Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n notification = ContainerChange(obj=self, name='measurements')\n if index is None:\n index = len(self.measurements)\n self.measurements.append(measurement)\n else:\n self.measurements.insert(index, measurement)\n notification.add_operation('added', (index, measurement))\n self.changed(notification)\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(measurements, Iterable):\n measurements = [measurements]\n notification = ContainerChange(obj=self, name='measurements')\n for measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\n self.changed(notification)\n<|end_body_1|>\n\n<|body_start_2|>\n measurement = self.measurements[old]\n del self.measurements[old]\n self.measurements.insert(new, measurement)\n notification = ContainerChange(obj=self, name='measurements')\n notification.add_operation('moved', (old, new, measurement))\n self.changed(notification)\n<|end_body_2|>\n", "revision_id": "bb003a0ec74b622e1fb0e1dbfdd052f43531bfbd", "skeleton": "<|skeleton|>\nclass MeasurementContainer:\n \"\"\"Generic container for measurements.\"\"\"\n\n def add(self, measurement, index=None):\n \"\"\"Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\"\"\"\n <|body_0|>\n\n def remove(self, measurements):\n \"\"\"Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\"\"\"\n <|body_1|>\n\n def move(self, old, new):\n \"\"\"Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MeasurementContainer:\n \"\"\"Generic container for measurements.\"\"\"\n\n def add(self, measurement, index=None):\n \"\"\"Add a measurement to the stored ones. Parameters ---------- measurement : Measurement Measurement to add. index : int | None Index at which to insert the measurement. If None the measurement is appended.\"\"\"\n notification = ContainerChange(obj=self, name='measurements')\n if index is None:\n index = len(self.measurements)\n self.measurements.append(measurement)\n else:\n self.measurements.insert(index, measurement)\n notification.add_operation('added', (index, measurement))\n self.changed(notification)\n\n def remove(self, measurements):\n \"\"\"Remove a measurement or a list of measurement. Parameters ---------- measurements : Measurement|list[Measurement] Measurement(s) to remove.\"\"\"\n if not isinstance(measurements, Iterable):\n measurements = [measurements]\n notification = ContainerChange(obj=self, name='measurements')\n for measurement in measurements:\n old = self.measurements.index(measurement)\n del self.measurements[old]\n notification.add_operation('removed', (old, measurement))\n self.changed(notification)\n\n def move(self, old, new):\n \"\"\"Move a measurement. Parameters ---------- old : int Index at which the measurement to move currently is. new_position : int Index at which to insert the measurement.\"\"\"\n measurement = self.measurements[old]\n del self.measurements[old]\n self.measurements.insert(new, measurement)\n notification = ContainerChange(obj=self, name='measurements')\n notification.add_operation('moved', (old, new, measurement))\n self.changed(notification)\n", "source": "the_stack_v2_python_sparse", "source_path": "exopy/measurement/container.py", "source_repo": "Exopy/exopy", "split": "val", "star_events_count": 17} {"blob_id": "136179789a29ac14403dc31ad56dfe9f1db9e318", "bodies": ["try:\n from avx_commons import get_db_credentials\n self.hostname = socket.gethostbyname(socket.gethostname())\n self.db_username, self.db_password, self.db_name = get_db_credentials()\n self.db_ip = db_ip\n self.db_port = db_port\n self.path = path\nexcept Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)", "try:\n cmd_load_balancer = current_file_path + '/../../Python/bin/python ' + current_file_path + '/load_balancer.py'\n run_local_cmd(cmd_load_balancer)\nexcept KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\nexcept Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)", "try:\n check = os.path.isfile(current_file_path + 'gateway_loadbalancer.js')\n if check == 'False':\n print('no load_balancer.js file found')\n lggr.error('gateway_loadbalancer.js not found!')\n print('exiting!')\n sys.exit(1)\n mongo = self.path + '/db/mongodb/bin/mongo'\n lb_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/gateway_loalbalancer.js'\n dbrm_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/dbremove.js'\n system_component_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/appviewx -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/system_components.js'\n run_local_cmd(dbrm_cmd)\n run_local_cmd(lb_cmd)\n print(lb_cmd)\n run_local_cmd(system_component_cmd)\nexcept KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\nexcept Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)", "try:\n self.generate_load_balancer()\n self.push_load_balancer()\nexcept KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\nexcept Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)"], "bodies_text": "<|body_start_0|>\n try:\n from avx_commons import get_db_credentials\n self.hostname = socket.gethostbyname(socket.gethostname())\n self.db_username, self.db_password, self.db_name = get_db_credentials()\n self.db_ip = db_ip\n self.db_port = db_port\n self.path = path\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n cmd_load_balancer = current_file_path + '/../../Python/bin/python ' + current_file_path + '/load_balancer.py'\n run_local_cmd(cmd_load_balancer)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n check = os.path.isfile(current_file_path + 'gateway_loadbalancer.js')\n if check == 'False':\n print('no load_balancer.js file found')\n lggr.error('gateway_loadbalancer.js not found!')\n print('exiting!')\n sys.exit(1)\n mongo = self.path + '/db/mongodb/bin/mongo'\n lb_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/gateway_loalbalancer.js'\n dbrm_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/dbremove.js'\n system_component_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/appviewx -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/system_components.js'\n run_local_cmd(dbrm_cmd)\n run_local_cmd(lb_cmd)\n print(lb_cmd)\n run_local_cmd(system_component_cmd)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.generate_load_balancer()\n self.push_load_balancer()\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_3|>\n", "class_docstring": ".", "class_name": "PushLB", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PushLB:\n \"\"\".\"\"\"\n\n def __init__(self, path, db_ip, db_port):\n \"\"\".\"\"\"\n <|body_0|>\n\n def generate_load_balancer():\n \"\"\".\"\"\"\n <|body_1|>\n\n def push_load_balancer(self):\n \"\"\".\"\"\"\n <|body_2|>\n\n def initialize(self):\n \"\"\".\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n from avx_commons import get_db_credentials\n self.hostname = socket.gethostbyname(socket.gethostname())\n self.db_username, self.db_password, self.db_name = get_db_credentials()\n self.db_ip = db_ip\n self.db_port = db_port\n self.path = path\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n cmd_load_balancer = current_file_path + '/../../Python/bin/python ' + current_file_path + '/load_balancer.py'\n run_local_cmd(cmd_load_balancer)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n check = os.path.isfile(current_file_path + 'gateway_loadbalancer.js')\n if check == 'False':\n print('no load_balancer.js file found')\n lggr.error('gateway_loadbalancer.js not found!')\n print('exiting!')\n sys.exit(1)\n mongo = self.path + '/db/mongodb/bin/mongo'\n lb_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/gateway_loalbalancer.js'\n dbrm_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/dbremove.js'\n system_component_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/appviewx -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/system_components.js'\n run_local_cmd(dbrm_cmd)\n run_local_cmd(lb_cmd)\n print(lb_cmd)\n run_local_cmd(system_component_cmd)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.generate_load_balancer()\n self.push_load_balancer()\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000015", "length_bytes": 4027, "license_type": "no_license", "methods": [{"docstring": ".", "name": "__init__", "signature": "def __init__(self, path, db_ip, db_port)"}, {"docstring": ".", "name": "generate_load_balancer", "signature": "def generate_load_balancer()"}, {"docstring": ".", "name": "push_load_balancer", "signature": "def push_load_balancer(self)"}, {"docstring": ".", "name": "initialize", "signature": "def initialize(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000062", "prompt": "Implement the Python class `PushLB` described below.\n\nClass description:\n.\n\nMethod signatures and docstrings:\n- def __init__(self, path, db_ip, db_port): .\n- def generate_load_balancer(): .\n- def push_load_balancer(self): .\n- def initialize(self): .", "prompted_full_text": "Implement the Python class `PushLB` described below.\n\nClass description:\n.\n\nMethod signatures and docstrings:\n- def __init__(self, path, db_ip, db_port): .\n- def generate_load_balancer(): .\n- def push_load_balancer(self): .\n- def initialize(self): .\n\n<|skeleton|>\nclass PushLB:\n \"\"\".\"\"\"\n\n def __init__(self, path, db_ip, db_port):\n \"\"\".\"\"\"\n <|body_0|>\n\n def generate_load_balancer():\n \"\"\".\"\"\"\n <|body_1|>\n\n def push_load_balancer(self):\n \"\"\".\"\"\"\n <|body_2|>\n\n def initialize(self):\n \"\"\".\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n from avx_commons import get_db_credentials\n self.hostname = socket.gethostbyname(socket.gethostname())\n self.db_username, self.db_password, self.db_name = get_db_credentials()\n self.db_ip = db_ip\n self.db_port = db_port\n self.path = path\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n cmd_load_balancer = current_file_path + '/../../Python/bin/python ' + current_file_path + '/load_balancer.py'\n run_local_cmd(cmd_load_balancer)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n check = os.path.isfile(current_file_path + 'gateway_loadbalancer.js')\n if check == 'False':\n print('no load_balancer.js file found')\n lggr.error('gateway_loadbalancer.js not found!')\n print('exiting!')\n sys.exit(1)\n mongo = self.path + '/db/mongodb/bin/mongo'\n lb_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/gateway_loalbalancer.js'\n dbrm_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/dbremove.js'\n system_component_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/appviewx -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/system_components.js'\n run_local_cmd(dbrm_cmd)\n run_local_cmd(lb_cmd)\n print(lb_cmd)\n run_local_cmd(system_component_cmd)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n self.generate_load_balancer()\n self.push_load_balancer()\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n<|end_body_3|>\n", "revision_id": "e513224364dce05ea4d17ac25ecfa981238b1311", "skeleton": "<|skeleton|>\nclass PushLB:\n \"\"\".\"\"\"\n\n def __init__(self, path, db_ip, db_port):\n \"\"\".\"\"\"\n <|body_0|>\n\n def generate_load_balancer():\n \"\"\".\"\"\"\n <|body_1|>\n\n def push_load_balancer(self):\n \"\"\".\"\"\"\n <|body_2|>\n\n def initialize(self):\n \"\"\".\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PushLB:\n \"\"\".\"\"\"\n\n def __init__(self, path, db_ip, db_port):\n \"\"\".\"\"\"\n try:\n from avx_commons import get_db_credentials\n self.hostname = socket.gethostbyname(socket.gethostname())\n self.db_username, self.db_password, self.db_name = get_db_credentials()\n self.db_ip = db_ip\n self.db_port = db_port\n self.path = path\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n\n def generate_load_balancer():\n \"\"\".\"\"\"\n try:\n cmd_load_balancer = current_file_path + '/../../Python/bin/python ' + current_file_path + '/load_balancer.py'\n run_local_cmd(cmd_load_balancer)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n\n def push_load_balancer(self):\n \"\"\".\"\"\"\n try:\n check = os.path.isfile(current_file_path + 'gateway_loadbalancer.js')\n if check == 'False':\n print('no load_balancer.js file found')\n lggr.error('gateway_loadbalancer.js not found!')\n print('exiting!')\n sys.exit(1)\n mongo = self.path + '/db/mongodb/bin/mongo'\n lb_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/gateway_loalbalancer.js'\n dbrm_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/gateway -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/dbremove.js'\n system_component_cmd = mongo + ' ' + self.db_ip + ':' + self.db_port + '/appviewx -u ' + self.db_username + ' -p ' + self.db_password + ' --authenticationDatabase ' + self.db_name + ' --quiet ' + self.path + '/scripts/Commons/system_components.js'\n run_local_cmd(dbrm_cmd)\n run_local_cmd(lb_cmd)\n print(lb_cmd)\n run_local_cmd(system_component_cmd)\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n\n def initialize(self):\n \"\"\".\"\"\"\n try:\n self.generate_load_balancer()\n self.push_load_balancer()\n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n sys.exit(1)\n except Exception as e:\n print(colored(e, 'red'))\n lggr.error(e)\n sys.exit(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts_avx/scripts/scripts/Commons/push_lb.py", "source_repo": "Poonammahunta/Integration", "split": "val", "star_events_count": 0} {"blob_id": "55a8d31018ec74d8722fc0afe894a7a192e2d665", "bodies": ["audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\nif audit['approved'] == True:\n abort(400, 'Already approved')\nschema = AuditUpdateSchema(only=['approved', 'submitted'])\nparams, _errors = schema.load({'approved': True, 'submitted': True})\nwith db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\nreturn AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)", "audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\nif audit['approved'] == False:\n abort(400, 'Not approved yet')\nschema = AuditUpdateSchema(only=['approved'])\nparams, _errors = schema.load({'approved': False})\nwith db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\nreturn AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)"], "bodies_text": "<|body_start_0|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == True:\n abort(400, 'Already approved')\n schema = AuditUpdateSchema(only=['approved', 'submitted'])\n params, _errors = schema.load({'approved': True, 'submitted': True})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_0|>\n\n<|body_start_1|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == False:\n abort(400, 'Not approved yet')\n schema = AuditUpdateSchema(only=['approved'])\n params, _errors = schema.load({'approved': False})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuditApproval", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuditApproval:\n\n def post(self, audit_uuid):\n \"\"\"Approve the specified audit submission\"\"\"\n <|body_0|>\n\n def delete(self, audit_uuid):\n \"\"\"Withdraw the approval of the specified audit submission\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == True:\n abort(400, 'Already approved')\n schema = AuditUpdateSchema(only=['approved', 'submitted'])\n params, _errors = schema.load({'approved': True, 'submitted': True})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_0|>\n\n<|body_start_1|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == False:\n abort(400, 'Not approved yet')\n schema = AuditUpdateSchema(only=['approved'])\n params, _errors = schema.load({'approved': False})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000016", "length_bytes": 18857, "license_type": "no_license", "methods": [{"docstring": "Approve the specified audit submission", "name": "post", "signature": "def post(self, audit_uuid)"}, {"docstring": "Withdraw the approval of the specified audit submission", "name": "delete", "signature": "def delete(self, audit_uuid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001922", "prompt": "Implement the Python class `AuditApproval` described below.\n\nClass description:\nImplement the AuditApproval class.\n\nMethod signatures and docstrings:\n- def post(self, audit_uuid): Approve the specified audit submission\n- def delete(self, audit_uuid): Withdraw the approval of the specified audit submission", "prompted_full_text": "Implement the Python class `AuditApproval` described below.\n\nClass description:\nImplement the AuditApproval class.\n\nMethod signatures and docstrings:\n- def post(self, audit_uuid): Approve the specified audit submission\n- def delete(self, audit_uuid): Withdraw the approval of the specified audit submission\n\n<|skeleton|>\nclass AuditApproval:\n\n def post(self, audit_uuid):\n \"\"\"Approve the specified audit submission\"\"\"\n <|body_0|>\n\n def delete(self, audit_uuid):\n \"\"\"Withdraw the approval of the specified audit submission\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == True:\n abort(400, 'Already approved')\n schema = AuditUpdateSchema(only=['approved', 'submitted'])\n params, _errors = schema.load({'approved': True, 'submitted': True})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_0|>\n\n<|body_start_1|>\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == False:\n abort(400, 'Not approved yet')\n schema = AuditUpdateSchema(only=['approved'])\n params, _errors = schema.load({'approved': False})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n<|end_body_1|>\n", "revision_id": "7b67aa682d73c8a8d7f0f19b2a90e69c40761c58", "skeleton": "<|skeleton|>\nclass AuditApproval:\n\n def post(self, audit_uuid):\n \"\"\"Approve the specified audit submission\"\"\"\n <|body_0|>\n\n def delete(self, audit_uuid):\n \"\"\"Withdraw the approval of the specified audit submission\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AuditApproval:\n def post(self, audit_uuid):\n \"\"\"Approve the specified audit submission\"\"\"\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == True:\n abort(400, 'Already approved')\n schema = AuditUpdateSchema(only=['approved', 'submitted'])\n params, _errors = schema.load({'approved': True, 'submitted': True})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n\n def delete(self, audit_uuid):\n \"\"\"Withdraw the approval of the specified audit submission\"\"\"\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n if audit['approved'] == False:\n abort(400, 'Not approved yet')\n schema = AuditUpdateSchema(only=['approved'])\n params, _errors = schema.load({'approved': False})\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit['id']).execute()\n return AuditResource.get_by_id(audit_uuid=audit['uuid'], withContacts=True, withScans=True)\n", "source": "the_stack_v2_python_sparse", "source_path": "rem/apis/audit.py", "source_repo": "recruit-tech/casval", "split": "val", "star_events_count": 6} {"blob_id": "16cc7584e376ca79edc9f9970a09cfb46c4fe1ec", "bodies": ["request = context['request']\nfrom reviewboard.urls import diffviewer_url_names\nmatch = request.resolver_match\nif match.url_name in diffviewer_url_names:\n return 'raw/'\nreturn local_site_reverse('raw-diff', request, kwargs={'review_request_id': context['review_request'].display_id})", "from reviewboard.urls import diffviewer_url_names\nmatch = context['request'].resolver_match\nif match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\nreturn super().get_visible(context=context)", "from reviewboard.urls import diffviewer_url_names\nmatch = context['request'].resolver_match\nif match.url_name in diffviewer_url_names:\n return True\nreview_request = context.get('review_request')\nreturn super().should_render(context=context) and review_request is not None and review_request.has_diffsets"], "bodies_text": "<|body_start_0|>\n request = context['request']\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n return local_site_reverse('raw-diff', request, kwargs={'review_request_id': context['review_request'].display_id})\n<|end_body_0|>\n\n<|body_start_1|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\n return super().get_visible(context=context)\n<|end_body_1|>\n\n<|body_start_2|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return True\n review_request = context.get('review_request')\n return super().should_render(context=context) and review_request is not None and review_request.has_diffsets\n<|end_body_2|>\n", "class_docstring": "The action to download a diff. Version Added: 6.0", "class_name": "DownloadDiffAction", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DownloadDiffAction:\n \"\"\"The action to download a diff. Version Added: 6.0\"\"\"\n\n def get_url(self, *, context: Context) -> str:\n \"\"\"Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\"\"\"\n <|body_0|>\n\n def get_visible(self, *, context: Context) -> bool:\n \"\"\"Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\"\"\"\n <|body_1|>\n\n def should_render(self, *, context: Context) -> bool:\n \"\"\"Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request = context['request']\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n return local_site_reverse('raw-diff', request, kwargs={'review_request_id': context['review_request'].display_id})\n<|end_body_0|>\n\n<|body_start_1|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\n return super().get_visible(context=context)\n<|end_body_1|>\n\n<|body_start_2|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return True\n review_request = context.get('review_request')\n return super().should_render(context=context) and review_request is not None and review_request.has_diffsets\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000017", "length_bytes": 36416, "license_type": "permissive", "methods": [{"docstring": "Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.", "name": "get_url", "signature": "def get_url(self, *, context: Context) -> str"}, {"docstring": "Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.", "name": "get_visible", "signature": "def get_visible(self, *, context: Context) -> bool"}, {"docstring": "Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.", "name": "should_render", "signature": "def should_render(self, *, context: Context) -> bool"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003113", "prompt": "Implement the Python class `DownloadDiffAction` described below.\n\nClass description:\nThe action to download a diff. Version Added: 6.0\n\nMethod signatures and docstrings:\n- def get_url(self, *, context: Context) -> str: Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\n- def get_visible(self, *, context: Context) -> bool: Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\n- def should_render(self, *, context: Context) -> bool: Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.", "prompted_full_text": "Implement the Python class `DownloadDiffAction` described below.\n\nClass description:\nThe action to download a diff. Version Added: 6.0\n\nMethod signatures and docstrings:\n- def get_url(self, *, context: Context) -> str: Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\n- def get_visible(self, *, context: Context) -> bool: Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\n- def should_render(self, *, context: Context) -> bool: Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.\n\n<|skeleton|>\nclass DownloadDiffAction:\n \"\"\"The action to download a diff. Version Added: 6.0\"\"\"\n\n def get_url(self, *, context: Context) -> str:\n \"\"\"Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\"\"\"\n <|body_0|>\n\n def get_visible(self, *, context: Context) -> bool:\n \"\"\"Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\"\"\"\n <|body_1|>\n\n def should_render(self, *, context: Context) -> bool:\n \"\"\"Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request = context['request']\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n return local_site_reverse('raw-diff', request, kwargs={'review_request_id': context['review_request'].display_id})\n<|end_body_0|>\n\n<|body_start_1|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\n return super().get_visible(context=context)\n<|end_body_1|>\n\n<|body_start_2|>\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return True\n review_request = context.get('review_request')\n return super().should_render(context=context) and review_request is not None and review_request.has_diffsets\n<|end_body_2|>\n", "revision_id": "c3a991f1e9d7682239a1ab0e8661cee6da01d537", "skeleton": "<|skeleton|>\nclass DownloadDiffAction:\n \"\"\"The action to download a diff. Version Added: 6.0\"\"\"\n\n def get_url(self, *, context: Context) -> str:\n \"\"\"Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\"\"\"\n <|body_0|>\n\n def get_visible(self, *, context: Context) -> bool:\n \"\"\"Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\"\"\"\n <|body_1|>\n\n def should_render(self, *, context: Context) -> bool:\n \"\"\"Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DownloadDiffAction:\n \"\"\"The action to download a diff. Version Added: 6.0\"\"\"\n\n def get_url(self, *, context: Context) -> str:\n \"\"\"Return this action's URL. Args: context (django.template.Context): The collection of key-value pairs from the template. Returns: str: The URL to invoke if this action is clicked.\"\"\"\n request = context['request']\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n return local_site_reverse('raw-diff', request, kwargs={'review_request_id': context['review_request'].display_id})\n\n def get_visible(self, *, context: Context) -> bool:\n \"\"\"Return whether the action should start visible or not. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should start visible. ``False``, otherwise.\"\"\"\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return match.url_name != 'view-interdiff'\n return super().get_visible(context=context)\n\n def should_render(self, *, context: Context) -> bool:\n \"\"\"Return whether this action should render. This differs from :py:attr:`hidden` in that hidden actions still render but are hidden by CSS, whereas if this returns ``False`` the action will not be included in the DOM at all. Args: context (django.template.Context): The current rendering context. Returns: bool: ``True`` if the action should render.\"\"\"\n from reviewboard.urls import diffviewer_url_names\n match = context['request'].resolver_match\n if match.url_name in diffviewer_url_names:\n return True\n review_request = context.get('review_request')\n return super().should_render(context=context) and review_request is not None and review_request.has_diffsets\n", "source": "the_stack_v2_python_sparse", "source_path": "reviewboard/reviews/actions.py", "source_repo": "reviewboard/reviewboard", "split": "val", "star_events_count": 1141} {"blob_id": "59b06542a499b4a746f18f68ab1bb2b3ba8165d2", "bodies": ["if model._meta.app_label in LABELS:\n return model._meta.app_label\nreturn None", "if model._meta.app_label in LABELS:\n return model._meta.app_label\nreturn None", "db_label1 = obj1._meta.app_label\ndb_label2 = obj2._meta.app_label\nif db_label1 and db_label2:\n if db_label1 == db_label2:\n return True\n else:\n return False\nreturn None", "if db in LABELS:\n return model._meta.app_label == db\nelif model._meta.app_label in LABELS:\n return False\nreturn None", "if db in LABELS:\n return app_label == db\nelif app_label in LABELS:\n return False\nreturn None"], "bodies_text": "<|body_start_0|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n db_label1 = obj1._meta.app_label\n db_label2 = obj2._meta.app_label\n if db_label1 and db_label2:\n if db_label1 == db_label2:\n return True\n else:\n return False\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if db in LABELS:\n return model._meta.app_label == db\n elif model._meta.app_label in LABELS:\n return False\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n if db in LABELS:\n return app_label == db\n elif app_label in LABELS:\n return False\n return None\n<|end_body_4|>\n", "class_docstring": "A router to control all database operations on models for different databases.", "class_name": "DatabaseAppsRouter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatabaseAppsRouter:\n \"\"\"A router to control all database operations on models for different databases.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"\"Point all read operations to the specific database.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Point all write operations to the specific database.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow any relation between apps that use the same database.\"\"\"\n <|body_2|>\n\n def allow_syncdb(self, db, model):\n \"\"\"Make sure that apps only appear in the related database.\"\"\"\n <|body_3|>\n\n def allow_migrate(self, db, app_label, model=None, **hints):\n \"\"\"Make sure the auth app only appears in the 'auth_db' database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n db_label1 = obj1._meta.app_label\n db_label2 = obj2._meta.app_label\n if db_label1 and db_label2:\n if db_label1 == db_label2:\n return True\n else:\n return False\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if db in LABELS:\n return model._meta.app_label == db\n elif model._meta.app_label in LABELS:\n return False\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n if db in LABELS:\n return app_label == db\n elif app_label in LABELS:\n return False\n return None\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000018", "length_bytes": 1746, "license_type": "no_license", "methods": [{"docstring": "\"Point all read operations to the specific database.", "name": "db_for_read", "signature": "def db_for_read(self, model, **hints)"}, {"docstring": "Point all write operations to the specific database.", "name": "db_for_write", "signature": "def db_for_write(self, model, **hints)"}, {"docstring": "Allow any relation between apps that use the same database.", "name": "allow_relation", "signature": "def allow_relation(self, obj1, obj2, **hints)"}, {"docstring": "Make sure that apps only appear in the related database.", "name": "allow_syncdb", "signature": "def allow_syncdb(self, db, model)"}, {"docstring": "Make sure the auth app only appears in the 'auth_db' database.", "name": "allow_migrate", "signature": "def allow_migrate(self, db, app_label, model=None, **hints)"}], "n_methods": 5, "prompt": "Implement the Python class `DatabaseAppsRouter` described below.\n\nClass description:\nA router to control all database operations on models for different databases.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): \"Point all read operations to the specific database.\n- def db_for_write(self, model, **hints): Point all write operations to the specific database.\n- def allow_relation(self, obj1, obj2, **hints): Allow any relation between apps that use the same database.\n- def allow_syncdb(self, db, model): Make sure that apps only appear in the related database.\n- def allow_migrate(self, db, app_label, model=None, **hints): Make sure the auth app only appears in the 'auth_db' database.", "prompted_full_text": "Implement the Python class `DatabaseAppsRouter` described below.\n\nClass description:\nA router to control all database operations on models for different databases.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): \"Point all read operations to the specific database.\n- def db_for_write(self, model, **hints): Point all write operations to the specific database.\n- def allow_relation(self, obj1, obj2, **hints): Allow any relation between apps that use the same database.\n- def allow_syncdb(self, db, model): Make sure that apps only appear in the related database.\n- def allow_migrate(self, db, app_label, model=None, **hints): Make sure the auth app only appears in the 'auth_db' database.\n\n<|skeleton|>\nclass DatabaseAppsRouter:\n \"\"\"A router to control all database operations on models for different databases.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"\"Point all read operations to the specific database.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Point all write operations to the specific database.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow any relation between apps that use the same database.\"\"\"\n <|body_2|>\n\n def allow_syncdb(self, db, model):\n \"\"\"Make sure that apps only appear in the related database.\"\"\"\n <|body_3|>\n\n def allow_migrate(self, db, app_label, model=None, **hints):\n \"\"\"Make sure the auth app only appears in the 'auth_db' database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n db_label1 = obj1._meta.app_label\n db_label2 = obj2._meta.app_label\n if db_label1 and db_label2:\n if db_label1 == db_label2:\n return True\n else:\n return False\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if db in LABELS:\n return model._meta.app_label == db\n elif model._meta.app_label in LABELS:\n return False\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n if db in LABELS:\n return app_label == db\n elif app_label in LABELS:\n return False\n return None\n<|end_body_4|>\n", "revision_id": "1b0e863ff3977471f5a94ef7d990796a9e9669c4", "skeleton": "<|skeleton|>\nclass DatabaseAppsRouter:\n \"\"\"A router to control all database operations on models for different databases.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"\"Point all read operations to the specific database.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Point all write operations to the specific database.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow any relation between apps that use the same database.\"\"\"\n <|body_2|>\n\n def allow_syncdb(self, db, model):\n \"\"\"Make sure that apps only appear in the related database.\"\"\"\n <|body_3|>\n\n def allow_migrate(self, db, app_label, model=None, **hints):\n \"\"\"Make sure the auth app only appears in the 'auth_db' database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DatabaseAppsRouter:\n \"\"\"A router to control all database operations on models for different databases.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"\"Point all read operations to the specific database.\"\"\"\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n\n def db_for_write(self, model, **hints):\n \"\"\"Point all write operations to the specific database.\"\"\"\n if model._meta.app_label in LABELS:\n return model._meta.app_label\n return None\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow any relation between apps that use the same database.\"\"\"\n db_label1 = obj1._meta.app_label\n db_label2 = obj2._meta.app_label\n if db_label1 and db_label2:\n if db_label1 == db_label2:\n return True\n else:\n return False\n return None\n\n def allow_syncdb(self, db, model):\n \"\"\"Make sure that apps only appear in the related database.\"\"\"\n if db in LABELS:\n return model._meta.app_label == db\n elif model._meta.app_label in LABELS:\n return False\n return None\n\n def allow_migrate(self, db, app_label, model=None, **hints):\n \"\"\"Make sure the auth app only appears in the 'auth_db' database.\"\"\"\n if db in LABELS:\n return app_label == db\n elif app_label in LABELS:\n return False\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "project/logchart/logchart/databaseRouter.py", "source_repo": "P79N6A/project_code", "split": "val", "star_events_count": 0} {"blob_id": "a559d65be327d7bc908edd544ad184f2bf88ebe2", "bodies": ["result = []\ndistance = [[0] * len(points) for _ in range(len(points))]\nfor i in range(len(points)):\n for j in range(i):\n distance[i][j] = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n distance[j][i] = distance[i][j]\n for m in range(i):\n if distance[i][j] == distance[i - 1 - m][j]:\n result.append([points[i], points[j], points[i - 1 - m]])\n result.append([points[i - 1 - m], points[j], points[i]])\n for m in range(j):\n if distance[i][j] == distance[i][j - 1 - m]:\n result.append([points[j], points[i], points[j - 1 - m]])\n result.append([points[j - 1 - m], points[i], points[j]])\nreturn len(result)", "conunt = 0\ndata = {}\nfor i in range(len(points)):\n for j in range(i):\n distance = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n exts = data.get(distance)\n if not exts:\n data[distance] = [[i, j]]\n else:\n for ext in exts:\n if ext[0] == i or ext[0] == j or ext[1] == i or (ext[1] == j):\n conunt += 2\n data[distance].append([i, j])\nreturn conunt"], "bodies_text": "<|body_start_0|>\n result = []\n distance = [[0] * len(points) for _ in range(len(points))]\n for i in range(len(points)):\n for j in range(i):\n distance[i][j] = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n distance[j][i] = distance[i][j]\n for m in range(i):\n if distance[i][j] == distance[i - 1 - m][j]:\n result.append([points[i], points[j], points[i - 1 - m]])\n result.append([points[i - 1 - m], points[j], points[i]])\n for m in range(j):\n if distance[i][j] == distance[i][j - 1 - m]:\n result.append([points[j], points[i], points[j - 1 - m]])\n result.append([points[j - 1 - m], points[i], points[j]])\n return len(result)\n<|end_body_0|>\n\n<|body_start_1|>\n conunt = 0\n data = {}\n for i in range(len(points)):\n for j in range(i):\n distance = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n exts = data.get(distance)\n if not exts:\n data[distance] = [[i, j]]\n else:\n for ext in exts:\n if ext[0] == i or ext[0] == j or ext[1] == i or (ext[1] == j):\n conunt += 2\n data[distance].append([i, j])\n return conunt\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def _numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n distance = [[0] * len(points) for _ in range(len(points))]\n for i in range(len(points)):\n for j in range(i):\n distance[i][j] = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n distance[j][i] = distance[i][j]\n for m in range(i):\n if distance[i][j] == distance[i - 1 - m][j]:\n result.append([points[i], points[j], points[i - 1 - m]])\n result.append([points[i - 1 - m], points[j], points[i]])\n for m in range(j):\n if distance[i][j] == distance[i][j - 1 - m]:\n result.append([points[j], points[i], points[j - 1 - m]])\n result.append([points[j - 1 - m], points[i], points[j]])\n return len(result)\n<|end_body_0|>\n\n<|body_start_1|>\n conunt = 0\n data = {}\n for i in range(len(points)):\n for j in range(i):\n distance = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n exts = data.get(distance)\n if not exts:\n data[distance] = [[i, j]]\n else:\n for ext in exts:\n if ext[0] == i or ext[0] == j or ext[1] == i or (ext[1] == j):\n conunt += 2\n data[distance].append([i, j])\n return conunt\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000019", "length_bytes": 8991, "license_type": "permissive", "methods": [{"docstring": ":type points: List[List[int]] :rtype: int", "name": "_numberOfBoomerangs", "signature": "def _numberOfBoomerangs(self, points)"}, {"docstring": ":type points: List[List[int]] :rtype: int", "name": "numberOfBoomerangs", "signature": "def numberOfBoomerangs(self, points)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def _numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int\n- def numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def _numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int\n- def numberOfBoomerangs(self, points): :type points: List[List[int]] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def _numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n distance = [[0] * len(points) for _ in range(len(points))]\n for i in range(len(points)):\n for j in range(i):\n distance[i][j] = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n distance[j][i] = distance[i][j]\n for m in range(i):\n if distance[i][j] == distance[i - 1 - m][j]:\n result.append([points[i], points[j], points[i - 1 - m]])\n result.append([points[i - 1 - m], points[j], points[i]])\n for m in range(j):\n if distance[i][j] == distance[i][j - 1 - m]:\n result.append([points[j], points[i], points[j - 1 - m]])\n result.append([points[j - 1 - m], points[i], points[j]])\n return len(result)\n<|end_body_0|>\n\n<|body_start_1|>\n conunt = 0\n data = {}\n for i in range(len(points)):\n for j in range(i):\n distance = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n exts = data.get(distance)\n if not exts:\n data[distance] = [[i, j]]\n else:\n for ext in exts:\n if ext[0] == i or ext[0] == j or ext[1] == i or (ext[1] == j):\n conunt += 2\n data[distance].append([i, j])\n return conunt\n<|end_body_1|>\n", "revision_id": "0dd67edca4e0b0323cb5a7239f02ea46383cd15a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def _numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def _numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n result = []\n distance = [[0] * len(points) for _ in range(len(points))]\n for i in range(len(points)):\n for j in range(i):\n distance[i][j] = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n distance[j][i] = distance[i][j]\n for m in range(i):\n if distance[i][j] == distance[i - 1 - m][j]:\n result.append([points[i], points[j], points[i - 1 - m]])\n result.append([points[i - 1 - m], points[j], points[i]])\n for m in range(j):\n if distance[i][j] == distance[i][j - 1 - m]:\n result.append([points[j], points[i], points[j - 1 - m]])\n result.append([points[j - 1 - m], points[i], points[j]])\n return len(result)\n\n def numberOfBoomerangs(self, points):\n \"\"\":type points: List[List[int]] :rtype: int\"\"\"\n conunt = 0\n data = {}\n for i in range(len(points)):\n for j in range(i):\n distance = (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2\n exts = data.get(distance)\n if not exts:\n data[distance] = [[i, j]]\n else:\n for ext in exts:\n if ext[0] == i or ext[0] == j or ext[1] == i or (ext[1] == j):\n conunt += 2\n data[distance].append([i, j])\n return conunt\n", "source": "the_stack_v2_python_sparse", "source_path": "447.number-of-boomerangs.py", "source_repo": "windard/leeeeee", "split": "val", "star_events_count": 0} {"blob_id": "5cf338b07054a033e3510c6cc4504c378a844078", "bodies": ["self.continuous_schedule = continuous_schedule\nself.daily_schedule = daily_schedule\nself.monthly_schedule = monthly_schedule\nself.periodicity = periodicity\nself.rpo_schedule = rpo_schedule", "if dictionary is None:\n return None\ncontinuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\ndaily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\nmonthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\nperiodicity = dictionary.get('periodicity')\nrpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\nreturn cls(continuous_schedule, daily_schedule, monthly_schedule, periodicity, rpo_schedule)"], "bodies_text": "<|body_start_0|>\n self.continuous_schedule = continuous_schedule\n self.daily_schedule = daily_schedule\n self.monthly_schedule = monthly_schedule\n self.periodicity = periodicity\n self.rpo_schedule = rpo_schedule\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\n daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\n monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\n periodicity = dictionary.get('periodicity')\n rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\n return cls(continuous_schedule, daily_schedule, monthly_schedule, periodicity, rpo_schedule)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail", "class_name": "SchedulingPolicy", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SchedulingPolicy:\n \"\"\"Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\"\"\"\n\n def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None):\n \"\"\"Constructor for the SchedulingPolicy class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.continuous_schedule = continuous_schedule\n self.daily_schedule = daily_schedule\n self.monthly_schedule = monthly_schedule\n self.periodicity = periodicity\n self.rpo_schedule = rpo_schedule\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\n daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\n monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\n periodicity = dictionary.get('periodicity')\n rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\n return cls(continuous_schedule, daily_schedule, monthly_schedule, periodicity, rpo_schedule)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000020", "length_bytes": 4041, "license_type": "permissive", "methods": [{"docstring": "Constructor for the SchedulingPolicy class", "name": "__init__", "signature": "def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004967", "prompt": "Implement the Python class `SchedulingPolicy` described below.\n\nClass description:\nImplementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\n\nMethod signatures and docstrings:\n- def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None): Constructor for the SchedulingPolicy class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `SchedulingPolicy` described below.\n\nClass description:\nImplementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\n\nMethod signatures and docstrings:\n- def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None): Constructor for the SchedulingPolicy class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass SchedulingPolicy:\n \"\"\"Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\"\"\"\n\n def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None):\n \"\"\"Constructor for the SchedulingPolicy class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.continuous_schedule = continuous_schedule\n self.daily_schedule = daily_schedule\n self.monthly_schedule = monthly_schedule\n self.periodicity = periodicity\n self.rpo_schedule = rpo_schedule\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\n daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\n monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\n periodicity = dictionary.get('periodicity')\n rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\n return cls(continuous_schedule, daily_schedule, monthly_schedule, periodicity, rpo_schedule)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass SchedulingPolicy:\n \"\"\"Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\"\"\"\n\n def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None):\n \"\"\"Constructor for the SchedulingPolicy class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SchedulingPolicy:\n \"\"\"Implementation of the 'SchedulingPolicy' model. Specifies settings that define a backup schedule for a Protection Job. Attributes: continuous_schedule (ContinuousSchedule): Specifies the time interval between two Job Runs of a continuous backup schedule and any QuietTime periods when new Job Runs should NOT be started. Set if periodicity is kContinuous. daily_schedule (DailySchedule): Specifies a daily or weekly backup schedule. Set if periodicity is kDaily. monthly_schedule (MonthlySchedule): Specifies a monthly backup schedule. Set if periodicity is kMonthly. periodicity (PeriodicityEnum): Specifies how often to start new Job Runs of a Protection Job. 'kDaily' means new Job Runs start dail\"\"\"\n\n def __init__(self, continuous_schedule=None, daily_schedule=None, monthly_schedule=None, periodicity=None, rpo_schedule=None):\n \"\"\"Constructor for the SchedulingPolicy class\"\"\"\n self.continuous_schedule = continuous_schedule\n self.daily_schedule = daily_schedule\n self.monthly_schedule = monthly_schedule\n self.periodicity = periodicity\n self.rpo_schedule = rpo_schedule\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None\n daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None\n monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None\n periodicity = dictionary.get('periodicity')\n rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None\n return cls(continuous_schedule, daily_schedule, monthly_schedule, periodicity, rpo_schedule)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/scheduling_policy.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "3a2ab2a624617dc54a5a954bf54162fb7ab6897d", "bodies": ["super().__init__(header, raw_data)\nself.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\nself.data = raw_data[8:8 + self.length] if self.length > 0 else b''", "tag = ResponseTag.name(self.header.tag)\nstatus = StatusCode.get(self.status, f'Unknown[0x{self.status:08X}]')\nreturn f'Tag={tag}, Status={status}, Length={self.length}'"], "bodies_text": "<|body_start_0|>\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''\n<|end_body_0|>\n\n<|body_start_1|>\n tag = ResponseTag.name(self.header.tag)\n status = StatusCode.get(self.status, f'Unknown[0x{self.status:08X}]')\n return f'Tag={tag}, Status={status}, Length={self.length}'\n<|end_body_1|>\n", "class_docstring": "McuBoot flash read once response format class.", "class_name": "FlashReadOnceResponse", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause", "LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlashReadOnceResponse:\n \"\"\"McuBoot flash read once response format class.\"\"\"\n\n def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n \"\"\"Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\"\"\"\n <|body_0|>\n\n def info(self) -> str:\n \"\"\"Get object info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''\n<|end_body_0|>\n\n<|body_start_1|>\n tag = ResponseTag.name(self.header.tag)\n status = StatusCode.get(self.status, f'Unknown[0x{self.status:08X}]')\n return f'Tag={tag}, Status={status}, Length={self.length}'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000021", "length_bytes": 14747, "license_type": "permissive", "methods": [{"docstring": "Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data", "name": "__init__", "signature": "def __init__(self, header: CmdHeader, raw_data: bytes) -> None"}, {"docstring": "Get object info.", "name": "info", "signature": "def info(self) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005455", "prompt": "Implement the Python class `FlashReadOnceResponse` described below.\n\nClass description:\nMcuBoot flash read once response format class.\n\nMethod signatures and docstrings:\n- def __init__(self, header: CmdHeader, raw_data: bytes) -> None: Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\n- def info(self) -> str: Get object info.", "prompted_full_text": "Implement the Python class `FlashReadOnceResponse` described below.\n\nClass description:\nMcuBoot flash read once response format class.\n\nMethod signatures and docstrings:\n- def __init__(self, header: CmdHeader, raw_data: bytes) -> None: Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\n- def info(self) -> str: Get object info.\n\n<|skeleton|>\nclass FlashReadOnceResponse:\n \"\"\"McuBoot flash read once response format class.\"\"\"\n\n def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n \"\"\"Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\"\"\"\n <|body_0|>\n\n def info(self) -> str:\n \"\"\"Get object info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''\n<|end_body_0|>\n\n<|body_start_1|>\n tag = ResponseTag.name(self.header.tag)\n status = StatusCode.get(self.status, f'Unknown[0x{self.status:08X}]')\n return f'Tag={tag}, Status={status}, Length={self.length}'\n<|end_body_1|>\n", "revision_id": "4a31fb091f95fb035bc66241ee4e02dabb580072", "skeleton": "<|skeleton|>\nclass FlashReadOnceResponse:\n \"\"\"McuBoot flash read once response format class.\"\"\"\n\n def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n \"\"\"Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\"\"\"\n <|body_0|>\n\n def info(self) -> str:\n \"\"\"Get object info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FlashReadOnceResponse:\n \"\"\"McuBoot flash read once response format class.\"\"\"\n\n def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n \"\"\"Initialize the Flash-Read-Once response object. :param header: Header for the response :param raw_data: Response data\"\"\"\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''\n\n def info(self) -> str:\n \"\"\"Get object info.\"\"\"\n tag = ResponseTag.name(self.header.tag)\n status = StatusCode.get(self.status, f'Unknown[0x{self.status:08X}]')\n return f'Tag={tag}, Status={status}, Length={self.length}'\n", "source": "the_stack_v2_python_sparse", "source_path": "spsdk/mboot/commands.py", "source_repo": "AdrianCano-01/spsdk", "split": "val", "star_events_count": 0} {"blob_id": "dc0fdee6d06d0083a83c86805c1cb2ab61666b71", "bodies": ["int_ = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']\nres = ''\ni = 0\nlength = len(s)\nif s[0] in int_:\n res += s[0]\n while i < length - 1:\n i += 1\n if s[i] in int_:\n res += s[i]\n else:\n break\n res = int(res)\n if res > 2 ** 31 - 1:\n res = 2 ** 31 - 1\n elif res < -2 * 31:\n res = -2 ** 31\n else:\n res = res\n print(res)\n return res\nelse:\n print('0')\n return 0", "res = re.findall('^[\\\\+\\\\-]?\\\\d+', s.strip())\nif res != []:\n if int(res[0]) > 2 ** 31 - 1:\n s_new = 2 ** 31 - 1\n elif int(res[0]) < -2 ** 31:\n s_new = -2 ** 31\n else:\n s_new = int(res[0])\nelse:\n s_new = 0\nprint(s_new)\nreturn s_new", "s_new = ''\ns = s.lstrip()\nif s.startswith('+') or s.startswith('-'):\n s_new += s[0]\n s = s[1:]\nfor ch in s:\n if ch.isdigit():\n s_new += ch\n else:\n break\nif s_new.lstrip('+-').isdigit():\n s_new = int(s_new)\nelse:\n return 0\nprint(min(2 ** 31 - 1, max(-2 ** 31, s_new)))\nreturn min(2 ** 31 - 1, max(-2 ** 31, s_new))"], "bodies_text": "<|body_start_0|>\n int_ = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']\n res = ''\n i = 0\n length = len(s)\n if s[0] in int_:\n res += s[0]\n while i < length - 1:\n i += 1\n if s[i] in int_:\n res += s[i]\n else:\n break\n res = int(res)\n if res > 2 ** 31 - 1:\n res = 2 ** 31 - 1\n elif res < -2 * 31:\n res = -2 ** 31\n else:\n res = res\n print(res)\n return res\n else:\n print('0')\n return 0\n<|end_body_0|>\n\n<|body_start_1|>\n res = re.findall('^[\\\\+\\\\-]?\\\\d+', s.strip())\n if res != []:\n if int(res[0]) > 2 ** 31 - 1:\n s_new = 2 ** 31 - 1\n elif int(res[0]) < -2 ** 31:\n s_new = -2 ** 31\n else:\n s_new = int(res[0])\n else:\n s_new = 0\n print(s_new)\n return s_new\n<|end_body_1|>\n\n<|body_start_2|>\n s_new = ''\n s = s.lstrip()\n if s.startswith('+') or s.startswith('-'):\n s_new += s[0]\n s = s[1:]\n for ch in s:\n if ch.isdigit():\n s_new += ch\n else:\n break\n if s_new.lstrip('+-').isdigit():\n s_new = int(s_new)\n else:\n return 0\n print(min(2 ** 31 - 1, max(-2 ** 31, s_new)))\n return min(2 ** 31 - 1, max(-2 ** 31, s_new))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def mystoi(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_0|>\n\n def mystoi_1(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_1|>\n\n def mystoi_2(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n int_ = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']\n res = ''\n i = 0\n length = len(s)\n if s[0] in int_:\n res += s[0]\n while i < length - 1:\n i += 1\n if s[i] in int_:\n res += s[i]\n else:\n break\n res = int(res)\n if res > 2 ** 31 - 1:\n res = 2 ** 31 - 1\n elif res < -2 * 31:\n res = -2 ** 31\n else:\n res = res\n print(res)\n return res\n else:\n print('0')\n return 0\n<|end_body_0|>\n\n<|body_start_1|>\n res = re.findall('^[\\\\+\\\\-]?\\\\d+', s.strip())\n if res != []:\n if int(res[0]) > 2 ** 31 - 1:\n s_new = 2 ** 31 - 1\n elif int(res[0]) < -2 ** 31:\n s_new = -2 ** 31\n else:\n s_new = int(res[0])\n else:\n s_new = 0\n print(s_new)\n return s_new\n<|end_body_1|>\n\n<|body_start_2|>\n s_new = ''\n s = s.lstrip()\n if s.startswith('+') or s.startswith('-'):\n s_new += s[0]\n s = s[1:]\n for ch in s:\n if ch.isdigit():\n s_new += ch\n else:\n break\n if s_new.lstrip('+-').isdigit():\n s_new = int(s_new)\n else:\n return 0\n print(min(2 ** 31 - 1, max(-2 ** 31, s_new)))\n return min(2 ** 31 - 1, max(-2 ** 31, s_new))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000022", "length_bytes": 3713, "license_type": "no_license", "methods": [{"docstring": ":type s:str :rtype:int", "name": "mystoi", "signature": "def mystoi(self, s)"}, {"docstring": ":type s:str :rtype:int", "name": "mystoi_1", "signature": "def mystoi_1(self, s)"}, {"docstring": ":type s:str :rtype:int", "name": "mystoi_2", "signature": "def mystoi_2(self, s)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003422", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def mystoi(self, s): :type s:str :rtype:int\n- def mystoi_1(self, s): :type s:str :rtype:int\n- def mystoi_2(self, s): :type s:str :rtype:int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def mystoi(self, s): :type s:str :rtype:int\n- def mystoi_1(self, s): :type s:str :rtype:int\n- def mystoi_2(self, s): :type s:str :rtype:int\n\n<|skeleton|>\nclass Solution:\n\n def mystoi(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_0|>\n\n def mystoi_1(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_1|>\n\n def mystoi_2(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n int_ = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']\n res = ''\n i = 0\n length = len(s)\n if s[0] in int_:\n res += s[0]\n while i < length - 1:\n i += 1\n if s[i] in int_:\n res += s[i]\n else:\n break\n res = int(res)\n if res > 2 ** 31 - 1:\n res = 2 ** 31 - 1\n elif res < -2 * 31:\n res = -2 ** 31\n else:\n res = res\n print(res)\n return res\n else:\n print('0')\n return 0\n<|end_body_0|>\n\n<|body_start_1|>\n res = re.findall('^[\\\\+\\\\-]?\\\\d+', s.strip())\n if res != []:\n if int(res[0]) > 2 ** 31 - 1:\n s_new = 2 ** 31 - 1\n elif int(res[0]) < -2 ** 31:\n s_new = -2 ** 31\n else:\n s_new = int(res[0])\n else:\n s_new = 0\n print(s_new)\n return s_new\n<|end_body_1|>\n\n<|body_start_2|>\n s_new = ''\n s = s.lstrip()\n if s.startswith('+') or s.startswith('-'):\n s_new += s[0]\n s = s[1:]\n for ch in s:\n if ch.isdigit():\n s_new += ch\n else:\n break\n if s_new.lstrip('+-').isdigit():\n s_new = int(s_new)\n else:\n return 0\n print(min(2 ** 31 - 1, max(-2 ** 31, s_new)))\n return min(2 ** 31 - 1, max(-2 ** 31, s_new))\n<|end_body_2|>\n", "revision_id": "4f2802d4773eddd2a2e06e61c51463056886b730", "skeleton": "<|skeleton|>\nclass Solution:\n\n def mystoi(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_0|>\n\n def mystoi_1(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_1|>\n\n def mystoi_2(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def mystoi(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n int_ = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']\n res = ''\n i = 0\n length = len(s)\n if s[0] in int_:\n res += s[0]\n while i < length - 1:\n i += 1\n if s[i] in int_:\n res += s[i]\n else:\n break\n res = int(res)\n if res > 2 ** 31 - 1:\n res = 2 ** 31 - 1\n elif res < -2 * 31:\n res = -2 ** 31\n else:\n res = res\n print(res)\n return res\n else:\n print('0')\n return 0\n\n def mystoi_1(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n res = re.findall('^[\\\\+\\\\-]?\\\\d+', s.strip())\n if res != []:\n if int(res[0]) > 2 ** 31 - 1:\n s_new = 2 ** 31 - 1\n elif int(res[0]) < -2 ** 31:\n s_new = -2 ** 31\n else:\n s_new = int(res[0])\n else:\n s_new = 0\n print(s_new)\n return s_new\n\n def mystoi_2(self, s):\n \"\"\":type s:str :rtype:int\"\"\"\n s_new = ''\n s = s.lstrip()\n if s.startswith('+') or s.startswith('-'):\n s_new += s[0]\n s = s[1:]\n for ch in s:\n if ch.isdigit():\n s_new += ch\n else:\n break\n if s_new.lstrip('+-').isdigit():\n s_new = int(s_new)\n else:\n return 0\n print(min(2 ** 31 - 1, max(-2 ** 31, s_new)))\n return min(2 ** 31 - 1, max(-2 ** 31, s_new))\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/17_myatoi.py", "source_repo": "Yara7L/python_algorithm", "split": "val", "star_events_count": 0} {"blob_id": "1d0a52c75ac5ba7fd7f96acff93807ca8778aec5", "bodies": ["self.x = constant([[0.7, 0.9]])\nself.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\nself.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))", "a = matmul(self.x, self.w1)\ny = matmul(a, self.w2)\nsess = Session()\nsess.run(self.w1.initializer)\nsess.run(self.w2.initializer)\nprint('第一种:', sess.run(y))\nsess.close()", "self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\nself.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\nself.x = placeholder(float32, shape=(3, 2), name='input')", "a = matmul(self.x, self.w1)\ny = matmul(a, self.w2)\nsess = Session()\ninitOp = global_variables_initializer()\nsess.run(initOp)\nprint('第二种:', sess.run(y, feed_dict={self.x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\nsess.close()", "\"\"\"\n crossEntry = - reduce_mean(y * log(clip_by_value(y, 1e-10 1.0)))\n # 定义学习率\n learningRate = 0.001\n # 定义反向传播算法来优化神经网络中的参数\n trainStep = train.AdamOptimizer(learningRate).minimize(crossEntry)\n \"\"\"\npass"], "bodies_text": "<|body_start_0|>\n self.x = constant([[0.7, 0.9]])\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n<|end_body_0|>\n\n<|body_start_1|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n sess.run(self.w1.initializer)\n sess.run(self.w2.initializer)\n print('第一种:', sess.run(y))\n sess.close()\n<|end_body_1|>\n\n<|body_start_2|>\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n self.x = placeholder(float32, shape=(3, 2), name='input')\n<|end_body_2|>\n\n<|body_start_3|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n initOp = global_variables_initializer()\n sess.run(initOp)\n print('第二种:', sess.run(y, feed_dict={self.x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\n sess.close()\n<|end_body_3|>\n\n<|body_start_4|>\n \"\"\"\n crossEntry = - reduce_mean(y * log(clip_by_value(y, 1e-10 1.0)))\n # 定义学习率\n learningRate = 0.001\n # 定义反向传播算法来优化神经网络中的参数\n trainStep = train.AdamOptimizer(learningRate).minimize(crossEntry)\n \"\"\"\n pass\n<|end_body_4|>\n", "class_docstring": "神经网络", "class_name": "NeuralNetworks", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NeuralNetworks:\n \"\"\"神经网络\"\"\"\n\n def example_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_0|>\n\n def example_compute(self):\n \"\"\"简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\"\"\"\n <|body_1|>\n\n def example_2_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_2|>\n\n def example_2_compute(self):\n \"\"\"实现前向传播算法,减少计算图中节点的个数\"\"\"\n <|body_3|>\n\n def evalute(self):\n \"\"\"定义损失函数来刻画预测值与真实值的差距\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = constant([[0.7, 0.9]])\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n<|end_body_0|>\n\n<|body_start_1|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n sess.run(self.w1.initializer)\n sess.run(self.w2.initializer)\n print('第一种:', sess.run(y))\n sess.close()\n<|end_body_1|>\n\n<|body_start_2|>\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n self.x = placeholder(float32, shape=(3, 2), name='input')\n<|end_body_2|>\n\n<|body_start_3|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n initOp = global_variables_initializer()\n sess.run(initOp)\n print('第二种:', sess.run(y, feed_dict={self.x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\n sess.close()\n<|end_body_3|>\n\n<|body_start_4|>\n \"\"\"\n crossEntry = - reduce_mean(y * log(clip_by_value(y, 1e-10 1.0)))\n # 定义学习率\n learningRate = 0.001\n # 定义反向传播算法来优化神经网络中的参数\n trainStep = train.AdamOptimizer(learningRate).minimize(crossEntry)\n \"\"\"\n pass\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000023", "length_bytes": 6839, "license_type": "permissive", "methods": [{"docstring": "加载数据", "name": "example_load_data", "signature": "def example_load_data(self)"}, {"docstring": "简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美", "name": "example_compute", "signature": "def example_compute(self)"}, {"docstring": "加载数据", "name": "example_2_load_data", "signature": "def example_2_load_data(self)"}, {"docstring": "实现前向传播算法,减少计算图中节点的个数", "name": "example_2_compute", "signature": "def example_2_compute(self)"}, {"docstring": "定义损失函数来刻画预测值与真实值的差距", "name": "evalute", "signature": "def evalute(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_000070", "prompt": "Implement the Python class `NeuralNetworks` described below.\n\nClass description:\n神经网络\n\nMethod signatures and docstrings:\n- def example_load_data(self): 加载数据\n- def example_compute(self): 简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\n- def example_2_load_data(self): 加载数据\n- def example_2_compute(self): 实现前向传播算法,减少计算图中节点的个数\n- def evalute(self): 定义损失函数来刻画预测值与真实值的差距", "prompted_full_text": "Implement the Python class `NeuralNetworks` described below.\n\nClass description:\n神经网络\n\nMethod signatures and docstrings:\n- def example_load_data(self): 加载数据\n- def example_compute(self): 简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\n- def example_2_load_data(self): 加载数据\n- def example_2_compute(self): 实现前向传播算法,减少计算图中节点的个数\n- def evalute(self): 定义损失函数来刻画预测值与真实值的差距\n\n<|skeleton|>\nclass NeuralNetworks:\n \"\"\"神经网络\"\"\"\n\n def example_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_0|>\n\n def example_compute(self):\n \"\"\"简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\"\"\"\n <|body_1|>\n\n def example_2_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_2|>\n\n def example_2_compute(self):\n \"\"\"实现前向传播算法,减少计算图中节点的个数\"\"\"\n <|body_3|>\n\n def evalute(self):\n \"\"\"定义损失函数来刻画预测值与真实值的差距\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.x = constant([[0.7, 0.9]])\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n<|end_body_0|>\n\n<|body_start_1|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n sess.run(self.w1.initializer)\n sess.run(self.w2.initializer)\n print('第一种:', sess.run(y))\n sess.close()\n<|end_body_1|>\n\n<|body_start_2|>\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n self.x = placeholder(float32, shape=(3, 2), name='input')\n<|end_body_2|>\n\n<|body_start_3|>\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n initOp = global_variables_initializer()\n sess.run(initOp)\n print('第二种:', sess.run(y, feed_dict={self.x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\n sess.close()\n<|end_body_3|>\n\n<|body_start_4|>\n \"\"\"\n crossEntry = - reduce_mean(y * log(clip_by_value(y, 1e-10 1.0)))\n # 定义学习率\n learningRate = 0.001\n # 定义反向传播算法来优化神经网络中的参数\n trainStep = train.AdamOptimizer(learningRate).minimize(crossEntry)\n \"\"\"\n pass\n<|end_body_4|>\n", "revision_id": "c8da7128ea18ecaa5849f2066d321e70d6f97f70", "skeleton": "<|skeleton|>\nclass NeuralNetworks:\n \"\"\"神经网络\"\"\"\n\n def example_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_0|>\n\n def example_compute(self):\n \"\"\"简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\"\"\"\n <|body_1|>\n\n def example_2_load_data(self):\n \"\"\"加载数据\"\"\"\n <|body_2|>\n\n def example_2_compute(self):\n \"\"\"实现前向传播算法,减少计算图中节点的个数\"\"\"\n <|body_3|>\n\n def evalute(self):\n \"\"\"定义损失函数来刻画预测值与真实值的差距\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NeuralNetworks:\n \"\"\"神经网络\"\"\"\n\n def example_load_data(self):\n \"\"\"加载数据\"\"\"\n self.x = constant([[0.7, 0.9]])\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n\n def example_compute(self):\n \"\"\"简单的神经网络实现前向传播的算法 这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现 正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美\"\"\"\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n sess.run(self.w1.initializer)\n sess.run(self.w2.initializer)\n print('第一种:', sess.run(y))\n sess.close()\n\n def example_2_load_data(self):\n \"\"\"加载数据\"\"\"\n self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))\n self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))\n self.x = placeholder(float32, shape=(3, 2), name='input')\n\n def example_2_compute(self):\n \"\"\"实现前向传播算法,减少计算图中节点的个数\"\"\"\n a = matmul(self.x, self.w1)\n y = matmul(a, self.w2)\n sess = Session()\n initOp = global_variables_initializer()\n sess.run(initOp)\n print('第二种:', sess.run(y, feed_dict={self.x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))\n sess.close()\n\n def evalute(self):\n \"\"\"定义损失函数来刻画预测值与真实值的差距\"\"\"\n \"\"\"\n crossEntry = - reduce_mean(y * log(clip_by_value(y, 1e-10 1.0)))\n # 定义学习率\n learningRate = 0.001\n # 定义反向传播算法来优化神经网络中的参数\n trainStep = train.AdamOptimizer(learningRate).minimize(crossEntry)\n \"\"\"\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "PYSTUDY/ml/tensorflowlib.py", "source_repo": "shi-cong/PYSTUDY", "split": "val", "star_events_count": 8} {"blob_id": "749b586b896d2236c4ef6c81750399b137c37347", "bodies": ["super(RBPDecisionMaker, self).__init__(search_context, logger)\nself.__patience = patience\nself.__random = Random()\nself.__random.seed(base_seed + 1024)", "rank = self._search_context.get_current_serp_position()\nrbp_score = self.__patience ** (rank - 1)\ndp = self.__random.random()\nif dp > rbp_score:\n return Actions.QUERY\nreturn Actions.SNIPPET"], "bodies_text": "<|body_start_0|>\n super(RBPDecisionMaker, self).__init__(search_context, logger)\n self.__patience = patience\n self.__random = Random()\n self.__random.seed(base_seed + 1024)\n<|end_body_0|>\n\n<|body_start_1|>\n rank = self._search_context.get_current_serp_position()\n rbp_score = self.__patience ** (rank - 1)\n dp = self.__random.random()\n if dp > rbp_score:\n return Actions.QUERY\n return Actions.SNIPPET\n<|end_body_1|>\n", "class_docstring": "An implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).", "class_name": "RBPDecisionMaker", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RBPDecisionMaker:\n \"\"\"An implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\"\"\"\n\n def __init__(self, search_context, logger, patience=0.5, base_seed=0):\n \"\"\"Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\"\"\"\n <|body_0|>\n\n def decide(self):\n \"\"\"Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RBPDecisionMaker, self).__init__(search_context, logger)\n self.__patience = patience\n self.__random = Random()\n self.__random.seed(base_seed + 1024)\n<|end_body_0|>\n\n<|body_start_1|>\n rank = self._search_context.get_current_serp_position()\n rbp_score = self.__patience ** (rank - 1)\n dp = self.__random.random()\n if dp > rbp_score:\n return Actions.QUERY\n return Actions.SNIPPET\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000024", "length_bytes": 1377, "license_type": "permissive", "methods": [{"docstring": "Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.", "name": "__init__", "signature": "def __init__(self, search_context, logger, patience=0.5, base_seed=0)"}, {"docstring": "Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.", "name": "decide", "signature": "def decide(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003555", "prompt": "Implement the Python class `RBPDecisionMaker` described below.\n\nClass description:\nAn implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\n\nMethod signatures and docstrings:\n- def __init__(self, search_context, logger, patience=0.5, base_seed=0): Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\n- def decide(self): Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.", "prompted_full_text": "Implement the Python class `RBPDecisionMaker` described below.\n\nClass description:\nAn implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\n\nMethod signatures and docstrings:\n- def __init__(self, search_context, logger, patience=0.5, base_seed=0): Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\n- def decide(self): Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.\n\n<|skeleton|>\nclass RBPDecisionMaker:\n \"\"\"An implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\"\"\"\n\n def __init__(self, search_context, logger, patience=0.5, base_seed=0):\n \"\"\"Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\"\"\"\n <|body_0|>\n\n def decide(self):\n \"\"\"Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RBPDecisionMaker, self).__init__(search_context, logger)\n self.__patience = patience\n self.__random = Random()\n self.__random.seed(base_seed + 1024)\n<|end_body_0|>\n\n<|body_start_1|>\n rank = self._search_context.get_current_serp_position()\n rbp_score = self.__patience ** (rank - 1)\n dp = self.__random.random()\n if dp > rbp_score:\n return Actions.QUERY\n return Actions.SNIPPET\n<|end_body_1|>\n", "revision_id": "c6f5b48cc9916c29f109d5ef74876ff8c073a44c", "skeleton": "<|skeleton|>\nclass RBPDecisionMaker:\n \"\"\"An implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\"\"\"\n\n def __init__(self, search_context, logger, patience=0.5, base_seed=0):\n \"\"\"Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\"\"\"\n <|body_0|>\n\n def decide(self):\n \"\"\"Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RBPDecisionMaker:\n \"\"\"An implementation of Rank-Biased Precision, operationalised as a stopping strategy. Uses a stochastic roll of the dice to determine if a searcher continues or not. Implemented as per Moffat and Zobel (2008).\"\"\"\n\n def __init__(self, search_context, logger, patience=0.5, base_seed=0):\n \"\"\"Instantiates the decision maker, with a patience factor (defaulting to 0.5). The patience factor of RBP determines how patient a searcher is. The closer to 1.0, the deeper the searcher will go.\"\"\"\n super(RBPDecisionMaker, self).__init__(search_context, logger)\n self.__patience = patience\n self.__random = Random()\n self.__random.seed(base_seed + 1024)\n\n def decide(self):\n \"\"\"Implements the basic RBP algorithm, using the RBP score computed with the roll of a dice to determine whether the searcher continues or not.\"\"\"\n rank = self._search_context.get_current_serp_position()\n rbp_score = self.__patience ** (rank - 1)\n dp = self.__random.random()\n if dp > rbp_score:\n return Actions.QUERY\n return Actions.SNIPPET\n", "source": "the_stack_v2_python_sparse", "source_path": "simiir/stopping_decision_makers/rbp_decision_maker.py", "source_repo": "ArthurCamara/simiir", "split": "val", "star_events_count": 0} {"blob_id": "b6f108f4282d5ad06959fbd37199846101714d23", "bodies": ["if isinstance(image, Image):\n image = image.id\n_ = command\n_ = stdout\n_ = stderr\n_ = remove\n_ = kwargs\nraise NotImplementedError", "if isinstance(image, Image):\n image = image.id\n_ = kwargs\n_ = command\nraise NotImplementedError", "container_id = urllib.parse.quote_plus(container_id)\nresponse = self.client.get(f'/containers/{container_id}/json')\nbody = response.json()\nif response.status_code == 200:\n return self.prepare_model(body)\nif response.status_code == 404:\n raise NotFound(body['cause'], response=response, explanation=body['message'])\nraise APIError(body['cause'], response=response, explanation=body['message'])", "params = {'all': kwargs.get('all', None), 'filters': kwargs.get('filters', dict()), 'limit': kwargs.get('limit', None)}\nif 'before' in kwargs:\n params['filters']['before'] = kwargs.get('before')\nif 'since' in kwargs:\n params['filters']['since'] = kwargs.get('since')\nif len(params['filters']) > 0:\n params['filters'] = api.format_filters(params['filters'])\nresponse = self.client.get('/containers/json', params=params)\nbody = response.json()\nif response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\ncontainers: List[Container] = []\nfor element in body:\n containers.append(self.prepare_model(element))\nreturn containers", "params = dict()\nif filters is not None:\n params = {'filters', api.format_filters(filters)}\nresponse = self.client.post('/containers/prune', params=params)\nbody = response.json()\nif response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\nresults = {'ContainersDeleted': [], 'SpaceReclaimed': 0}\nfor entry in body:\n if entry.get('error', None) is not None:\n raise APIError(entry['error'], response=response, explanation=entry['error'])\n results['ContainersDeleted'].append(entry['id'])\n results['SpaceReclaimed'] += entry['space']\nreturn results"], "bodies_text": "<|body_start_0|>\n if isinstance(image, Image):\n image = image.id\n _ = command\n _ = stdout\n _ = stderr\n _ = remove\n _ = kwargs\n raise NotImplementedError\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(image, Image):\n image = image.id\n _ = kwargs\n _ = command\n raise NotImplementedError\n<|end_body_1|>\n\n<|body_start_2|>\n container_id = urllib.parse.quote_plus(container_id)\n response = self.client.get(f'/containers/{container_id}/json')\n body = response.json()\n if response.status_code == 200:\n return self.prepare_model(body)\n if response.status_code == 404:\n raise NotFound(body['cause'], response=response, explanation=body['message'])\n raise APIError(body['cause'], response=response, explanation=body['message'])\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'all': kwargs.get('all', None), 'filters': kwargs.get('filters', dict()), 'limit': kwargs.get('limit', None)}\n if 'before' in kwargs:\n params['filters']['before'] = kwargs.get('before')\n if 'since' in kwargs:\n params['filters']['since'] = kwargs.get('since')\n if len(params['filters']) > 0:\n params['filters'] = api.format_filters(params['filters'])\n response = self.client.get('/containers/json', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n containers: List[Container] = []\n for element in body:\n containers.append(self.prepare_model(element))\n return containers\n<|end_body_3|>\n\n<|body_start_4|>\n params = dict()\n if filters is not None:\n params = {'filters', api.format_filters(filters)}\n response = self.client.post('/containers/prune', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n results = {'ContainersDeleted': [], 'SpaceReclaimed': 0}\n for entry in body:\n if entry.get('error', None) is not None:\n raise APIError(entry['error'], response=response, explanation=entry['error'])\n results['ContainersDeleted'].append(entry['id'])\n results['SpaceReclaimed'] += entry['space']\n return results\n<|end_body_4|>\n", "class_docstring": "Specialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.", "class_name": "ContainersManager", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ContainersManager:\n \"\"\"Specialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\"\"\"\n\n def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]:\n \"\"\"Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\"\"\"\n <|body_0|>\n\n def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container:\n \"\"\"Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\"\"\"\n <|body_1|>\n\n def get(self, container_id: str) -> Container:\n \"\"\"Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\"\"\"\n <|body_2|>\n\n def list(self, **kwargs) -> List[Container]:\n \"\"\"Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or \n\n def prune(self, filters: Mapping[str, str]=None) -> Dict[str, Any]:\n \"\"\"Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(image, Image):\n image = image.id\n _ = command\n _ = stdout\n _ = stderr\n _ = remove\n _ = kwargs\n raise NotImplementedError\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(image, Image):\n image = image.id\n _ = kwargs\n _ = command\n raise NotImplementedError\n<|end_body_1|>\n\n<|body_start_2|>\n container_id = urllib.parse.quote_plus(container_id)\n response = self.client.get(f'/containers/{container_id}/json')\n body = response.json()\n if response.status_code == 200:\n return self.prepare_model(body)\n if response.status_code == 404:\n raise NotFound(body['cause'], response=response, explanation=body['message'])\n raise APIError(body['cause'], response=response, explanation=body['message'])\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'all': kwargs.get('all', None), 'filters': kwargs.get('filters', dict()), 'limit': kwargs.get('limit', None)}\n if 'before' in kwargs:\n params['filters']['before'] = kwargs.get('before')\n if 'since' in kwargs:\n params['filters']['since'] = kwargs.get('since')\n if len(params['filters']) > 0:\n params['filters'] = api.format_filters(params['filters'])\n response = self.client.get('/containers/json', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n containers: List[Container] = []\n for element in body:\n containers.append(self.prepare_model(element))\n return containers\n<|end_body_3|>\n\n<|body_start_4|>\n params = dict()\n if filters is not None:\n params = {'filters', api.format_filters(filters)}\n response = self.client.post('/containers/prune', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n results = {'ContainersDeleted': [], 'SpaceReclaimed': 0}\n for entry in body:\n if entry.get('error', None) is not None:\n raise APIError(entry['error'], response=response, explanation=entry['error'])\n results['ContainersDeleted'].append(entry['id'])\n results['SpaceReclaimed'] += entry['space']\n return results\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000025", "length_bytes": 18303, "license_type": "permissive", "methods": [{"docstring": "Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block", "name": "run", "signature": "def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]"}, {"docstring": "Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.", "name": "create", "signature": "def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container"}, {"docstring": "Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.", "name": "get", "signature": "def get(self, container_id: str) -> Container"}, {"docstring": "Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or List[Container]"}, {"docstring": "Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error", "name": "prune", "signature": "def prune(self, filters: Mapping[str, str]=None) -> Dict[str, Any]"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_003082", "prompt": "Implement the Python class `ContainersManager` described below.\n\nClass description:\nSpecialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\n\nMethod signatures and docstrings:\n- def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]: Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\n- def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container: Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\n- def get(self, container_id: str) -> Container: Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\n- def list(self, **kwargs) -> List[Container]: Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or Dict[str, Any]: Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error", "prompted_full_text": "Implement the Python class `ContainersManager` described below.\n\nClass description:\nSpecialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\n\nMethod signatures and docstrings:\n- def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]: Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\n- def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container: Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\n- def get(self, container_id: str) -> Container: Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\n- def list(self, **kwargs) -> List[Container]: Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or Dict[str, Any]: Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error\n\n<|skeleton|>\nclass ContainersManager:\n \"\"\"Specialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\"\"\"\n\n def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]:\n \"\"\"Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\"\"\"\n <|body_0|>\n\n def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container:\n \"\"\"Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\"\"\"\n <|body_1|>\n\n def get(self, container_id: str) -> Container:\n \"\"\"Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\"\"\"\n <|body_2|>\n\n def list(self, **kwargs) -> List[Container]:\n \"\"\"Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or \n\n def prune(self, filters: Mapping[str, str]=None) -> Dict[str, Any]:\n \"\"\"Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(image, Image):\n image = image.id\n _ = command\n _ = stdout\n _ = stderr\n _ = remove\n _ = kwargs\n raise NotImplementedError\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(image, Image):\n image = image.id\n _ = kwargs\n _ = command\n raise NotImplementedError\n<|end_body_1|>\n\n<|body_start_2|>\n container_id = urllib.parse.quote_plus(container_id)\n response = self.client.get(f'/containers/{container_id}/json')\n body = response.json()\n if response.status_code == 200:\n return self.prepare_model(body)\n if response.status_code == 404:\n raise NotFound(body['cause'], response=response, explanation=body['message'])\n raise APIError(body['cause'], response=response, explanation=body['message'])\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'all': kwargs.get('all', None), 'filters': kwargs.get('filters', dict()), 'limit': kwargs.get('limit', None)}\n if 'before' in kwargs:\n params['filters']['before'] = kwargs.get('before')\n if 'since' in kwargs:\n params['filters']['since'] = kwargs.get('since')\n if len(params['filters']) > 0:\n params['filters'] = api.format_filters(params['filters'])\n response = self.client.get('/containers/json', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n containers: List[Container] = []\n for element in body:\n containers.append(self.prepare_model(element))\n return containers\n<|end_body_3|>\n\n<|body_start_4|>\n params = dict()\n if filters is not None:\n params = {'filters', api.format_filters(filters)}\n response = self.client.post('/containers/prune', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n results = {'ContainersDeleted': [], 'SpaceReclaimed': 0}\n for entry in body:\n if entry.get('error', None) is not None:\n raise APIError(entry['error'], response=response, explanation=entry['error'])\n results['ContainersDeleted'].append(entry['id'])\n results['SpaceReclaimed'] += entry['space']\n return results\n<|end_body_4|>\n", "revision_id": "2788e93ec49f95461d639c1e8c86fc8857fa2a85", "skeleton": "<|skeleton|>\nclass ContainersManager:\n \"\"\"Specialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\"\"\"\n\n def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]:\n \"\"\"Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\"\"\"\n <|body_0|>\n\n def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container:\n \"\"\"Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\"\"\"\n <|body_1|>\n\n def get(self, container_id: str) -> Container:\n \"\"\"Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\"\"\"\n <|body_2|>\n\n def list(self, **kwargs) -> List[Container]:\n \"\"\"Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or \n\n def prune(self, filters: Mapping[str, str]=None) -> Dict[str, Any]:\n \"\"\"Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ContainersManager:\n \"\"\"Specialized Manager for Container resources. Attributes: resource: Container subclass of PodmanResource, factory method will create these.\"\"\"\n\n def run(self, image: Union[str, Image], command: Union[str, List[str]]=None, stdout=True, stderr=False, remove: bool=False, **kwargs) -> Union[Container, Sequence[str]]:\n \"\"\"Run container. By default, run() will wait for the container to finish and return its logs. If detach=True, run() will start the container and return a Container object rather than logs. Args: image: Image to run. command: Command to run in the container. stdout: Include stdout. Default: True. stderr: Include stderr. Default: False. remove: Delete container when the container's processes exit. Default: False. Keyword Args: auto_remove (bool): Enable auto-removal of the container on daemon side when the container's process exits. blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight) in the form of: [{\"Path\": \"device_path\", \"Weight\": weight}]. blkio_weight (int): Block\"\"\"\n if isinstance(image, Image):\n image = image.id\n _ = command\n _ = stdout\n _ = stderr\n _ = remove\n _ = kwargs\n raise NotImplementedError\n\n def create(self, image: Union[Image, str], command: Union[str, List[str]]=None, **kwargs) -> Container:\n \"\"\"Create a container. See Container.run() for arguments. The following are ignored: stdout, stderr, and remove. Raises: ImageNotFound: If given image does not exist. APIError: If service returns an error.\"\"\"\n if isinstance(image, Image):\n image = image.id\n _ = kwargs\n _ = command\n raise NotImplementedError\n\n def get(self, container_id: str) -> Container:\n \"\"\"Get container by name or id. Args: container_id: Container name or id. Raises: NotFound: Container does not exist. APIError: Error return by service.\"\"\"\n container_id = urllib.parse.quote_plus(container_id)\n response = self.client.get(f'/containers/{container_id}/json')\n body = response.json()\n if response.status_code == 200:\n return self.prepare_model(body)\n if response.status_code == 404:\n raise NotFound(body['cause'], response=response, explanation=body['message'])\n raise APIError(body['cause'], response=response, explanation=body['message'])\n\n def list(self, **kwargs) -> List[Container]:\n \"\"\"Report on containers. Keyword Args: all: If False, only show running containers. Default: False. since: Show containers created after container name or id given. before: Show containers created before container name or id given. limit: Show last N created containers. filters: Filter container reported. Available filters: - exited (int): Only containers with specified exit code - status (str): One of restarting, running, paused, exited - label (Union[str, List[str]]): Format either \"key\", \"key=value\" or a list of such. - id (str): The id of the container. - name (str): The name of the container. - ancestor (str): Filter by container ancestor. Format of [:tag], , or 0:\n params['filters'] = api.format_filters(params['filters'])\n response = self.client.get('/containers/json', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n containers: List[Container] = []\n for element in body:\n containers.append(self.prepare_model(element))\n return containers\n\n def prune(self, filters: Mapping[str, str]=None) -> Dict[str, Any]:\n \"\"\"Delete stopped containers. Args: filters: Dict of criteria for determining containers to remove. Available keys are: - until (str): Delete containers before this time - label (List[str]): Labels associated with containers Returns: List of deleted container id's and the freed disk space in bytes. Raises: APIError: If service reports an error\"\"\"\n params = dict()\n if filters is not None:\n params = {'filters', api.format_filters(filters)}\n response = self.client.post('/containers/prune', params=params)\n body = response.json()\n if response.status_code != 200:\n raise APIError(body['cause'], response=response, explanation=body['message'])\n results = {'ContainersDeleted': [], 'SpaceReclaimed': 0}\n for entry in body:\n if entry.get('error', None) is not None:\n raise APIError(entry['error'], response=response, explanation=entry['error'])\n results['ContainersDeleted'].append(entry['id'])\n results['SpaceReclaimed'] += entry['space']\n return results\n", "source": "the_stack_v2_python_sparse", "source_path": "podman/domain/containers_manager.py", "source_repo": "P-a-t-r-i-c-k/podman-py", "split": "val", "star_events_count": 0} {"blob_id": "f253ab8ff8d84bcd66755d1830066a25aa219732", "bodies": ["tag1 = -1\nfor i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]:\n tag1 = i\n break\nif tag1 != -1:\n for i in range(len(nums) - 1, -1, -1):\n if nums[tag1] < nums[i]:\n tmp = nums[tag1]\n nums[tag1] = nums[i]\n nums[i] = tmp\n break\nelse:\n return 1\nleft, right = (tag1 + 1, len(nums) - 1)\nwhile left < right:\n tmp = nums[left]\n nums[left] = nums[right]\n nums[right] = tmp\n left = left + 1\n right = right - 1", "nums = sorted(nums)\nresult = []\nwhile True:\n result.append(nums[:])\n if self.nextPermutation(nums):\n return result"], "bodies_text": "<|body_start_0|>\n tag1 = -1\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]:\n tag1 = i\n break\n if tag1 != -1:\n for i in range(len(nums) - 1, -1, -1):\n if nums[tag1] < nums[i]:\n tmp = nums[tag1]\n nums[tag1] = nums[i]\n nums[i] = tmp\n break\n else:\n return 1\n left, right = (tag1 + 1, len(nums) - 1)\n while left < right:\n tmp = nums[left]\n nums[left] = nums[right]\n nums[right] = tmp\n left = left + 1\n right = right - 1\n<|end_body_0|>\n\n<|body_start_1|>\n nums = sorted(nums)\n result = []\n while True:\n result.append(nums[:])\n if self.nextPermutation(nums):\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def permute(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tag1 = -1\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]:\n tag1 = i\n break\n if tag1 != -1:\n for i in range(len(nums) - 1, -1, -1):\n if nums[tag1] < nums[i]:\n tmp = nums[tag1]\n nums[tag1] = nums[i]\n nums[i] = tmp\n break\n else:\n return 1\n left, right = (tag1 + 1, len(nums) - 1)\n while left < right:\n tmp = nums[left]\n nums[left] = nums[right]\n nums[right] = tmp\n left = left + 1\n right = right - 1\n<|end_body_0|>\n\n<|body_start_1|>\n nums = sorted(nums)\n result = []\n while True:\n result.append(nums[:])\n if self.nextPermutation(nums):\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000026", "length_bytes": 1183, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "name": "nextPermutation", "signature": "def nextPermutation(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "permute", "signature": "def permute(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001192", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextPermutation(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n- def permute(self, nums): :type nums: List[int] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextPermutation(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n- def permute(self, nums): :type nums: List[int] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def permute(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tag1 = -1\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]:\n tag1 = i\n break\n if tag1 != -1:\n for i in range(len(nums) - 1, -1, -1):\n if nums[tag1] < nums[i]:\n tmp = nums[tag1]\n nums[tag1] = nums[i]\n nums[i] = tmp\n break\n else:\n return 1\n left, right = (tag1 + 1, len(nums) - 1)\n while left < right:\n tmp = nums[left]\n nums[left] = nums[right]\n nums[right] = tmp\n left = left + 1\n right = right - 1\n<|end_body_0|>\n\n<|body_start_1|>\n nums = sorted(nums)\n result = []\n while True:\n result.append(nums[:])\n if self.nextPermutation(nums):\n return result\n<|end_body_1|>\n", "revision_id": "c2b01374942dcba7fbbe7865d13d7599bbc083f3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def permute(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def nextPermutation(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n tag1 = -1\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]:\n tag1 = i\n break\n if tag1 != -1:\n for i in range(len(nums) - 1, -1, -1):\n if nums[tag1] < nums[i]:\n tmp = nums[tag1]\n nums[tag1] = nums[i]\n nums[i] = tmp\n break\n else:\n return 1\n left, right = (tag1 + 1, len(nums) - 1)\n while left < right:\n tmp = nums[left]\n nums[left] = nums[right]\n nums[right] = tmp\n left = left + 1\n right = right - 1\n\n def permute(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n nums = sorted(nums)\n result = []\n while True:\n result.append(nums[:])\n if self.nextPermutation(nums):\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "P0046.py", "source_repo": "chenjiahui1991/LeetCode", "split": "val", "star_events_count": 0} {"blob_id": "ccc9ea9bcf06f62eeb0b6f8cf9c9d24e71d3f002", "bodies": ["board = self.board_class()\nboard.place_token(1, 1, 'X')\nboard.place_token(0, 0, 'O')\nboard.place_token(1, 0, 'X')\nassert str(board) == 'O|X| \\n |X| \\n | | \\n'", "board = self.board_class()\nboard.place_token(1, 1, 'X')\nboard.place_token(0, 0, 'O')\nboard.place_token(1, 0, 'X')\nboard.place_token(0, 2, 'O')\nassert board.calc_winner() is None", "board = self.board_class()\nboard.place_token(1, 1, 'X')\nboard.place_token(0, 0, 'O')\nboard.place_token(1, 0, 'X')\nboard.place_token(0, 2, 'O')\nboard.place_token(1, 2, 'X')\nassert board.calc_winner() == 'X'"], "bodies_text": "<|body_start_0|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n assert str(board) == 'O|X| \\n |X| \\n | | \\n'\n<|end_body_0|>\n\n<|body_start_1|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n assert board.calc_winner() is None\n<|end_body_1|>\n\n<|body_start_2|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'\n<|end_body_2|>\n", "class_docstring": "Test class that will test a board. Store the class to be tested in a board_class class variable.", "class_name": "BaseBoardTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseBoardTest:\n \"\"\"Test class that will test a board. Store the class to be tested in a board_class class variable.\"\"\"\n\n def test_str(self):\n \"\"\"Test that the magic string method on a full board works.\"\"\"\n <|body_0|>\n\n def test_calc_winner_none(self):\n \"\"\"Test that calculating a winner returns None when no winner.\"\"\"\n <|body_1|>\n\n def test_winner_won(self):\n \"\"\"Test that calculating a winner returns the winner.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n assert str(board) == 'O|X| \\n |X| \\n | | \\n'\n<|end_body_0|>\n\n<|body_start_1|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n assert board.calc_winner() is None\n<|end_body_1|>\n\n<|body_start_2|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000027", "length_bytes": 1716, "license_type": "no_license", "methods": [{"docstring": "Test that the magic string method on a full board works.", "name": "test_str", "signature": "def test_str(self)"}, {"docstring": "Test that calculating a winner returns None when no winner.", "name": "test_calc_winner_none", "signature": "def test_calc_winner_none(self)"}, {"docstring": "Test that calculating a winner returns the winner.", "name": "test_winner_won", "signature": "def test_winner_won(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000647", "prompt": "Implement the Python class `BaseBoardTest` described below.\n\nClass description:\nTest class that will test a board. Store the class to be tested in a board_class class variable.\n\nMethod signatures and docstrings:\n- def test_str(self): Test that the magic string method on a full board works.\n- def test_calc_winner_none(self): Test that calculating a winner returns None when no winner.\n- def test_winner_won(self): Test that calculating a winner returns the winner.", "prompted_full_text": "Implement the Python class `BaseBoardTest` described below.\n\nClass description:\nTest class that will test a board. Store the class to be tested in a board_class class variable.\n\nMethod signatures and docstrings:\n- def test_str(self): Test that the magic string method on a full board works.\n- def test_calc_winner_none(self): Test that calculating a winner returns None when no winner.\n- def test_winner_won(self): Test that calculating a winner returns the winner.\n\n<|skeleton|>\nclass BaseBoardTest:\n \"\"\"Test class that will test a board. Store the class to be tested in a board_class class variable.\"\"\"\n\n def test_str(self):\n \"\"\"Test that the magic string method on a full board works.\"\"\"\n <|body_0|>\n\n def test_calc_winner_none(self):\n \"\"\"Test that calculating a winner returns None when no winner.\"\"\"\n <|body_1|>\n\n def test_winner_won(self):\n \"\"\"Test that calculating a winner returns the winner.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n assert str(board) == 'O|X| \\n |X| \\n | | \\n'\n<|end_body_0|>\n\n<|body_start_1|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n assert board.calc_winner() is None\n<|end_body_1|>\n\n<|body_start_2|>\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'\n<|end_body_2|>\n", "revision_id": "1c77e724b3f11a97998972f5a1f65593257e1c99", "skeleton": "<|skeleton|>\nclass BaseBoardTest:\n \"\"\"Test class that will test a board. Store the class to be tested in a board_class class variable.\"\"\"\n\n def test_str(self):\n \"\"\"Test that the magic string method on a full board works.\"\"\"\n <|body_0|>\n\n def test_calc_winner_none(self):\n \"\"\"Test that calculating a winner returns None when no winner.\"\"\"\n <|body_1|>\n\n def test_winner_won(self):\n \"\"\"Test that calculating a winner returns the winner.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseBoardTest:\n \"\"\"Test class that will test a board. Store the class to be tested in a board_class class variable.\"\"\"\n\n def test_str(self):\n \"\"\"Test that the magic string method on a full board works.\"\"\"\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n assert str(board) == 'O|X| \\n |X| \\n | | \\n'\n\n def test_calc_winner_none(self):\n \"\"\"Test that calculating a winner returns None when no winner.\"\"\"\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n assert board.calc_winner() is None\n\n def test_winner_won(self):\n \"\"\"Test that calculating a winner returns the winner.\"\"\"\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'\n", "source": "the_stack_v2_python_sparse", "source_path": "practice/ttt-interface/boards_test.py", "source_repo": "PdxCodeGuild/Full-Stack-Day-Class", "split": "val", "star_events_count": 7} {"blob_id": "a416b86ff83ecb463976c984fce39daf1025bf30", "bodies": ["user_id = request.user.id\norder = Order.objects.filter(user_id=user_id)\nserializer = OrderSerializer(order, many=True)\nreturn Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_200_OK)", "data = request.data\naddress_id = request.query_params['address_id']\ndata['user'] = request.user.id\ndata['address'] = address_id\nserializer = OrderCreateSerializer(data=data)\nif serializer.is_valid():\n serializer.save()\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_201_CREATED)\nreturn Response({'result': serializer.errors, 'message': 'Done', 'status': False}, status=status.HTTP_400_BAD_REQUEST)"], "bodies_text": "<|body_start_0|>\n user_id = request.user.id\n order = Order.objects.filter(user_id=user_id)\n serializer = OrderSerializer(order, many=True)\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n address_id = request.query_params['address_id']\n data['user'] = request.user.id\n data['address'] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_201_CREATED)\n return Response({'result': serializer.errors, 'message': 'Done', 'status': False}, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "OrderApi", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OrderApi:\n\n def details(self, request, *args, **kwargs):\n \"\"\"list user orders\"\"\"\n <|body_0|>\n\n def create_order(self, request):\n \"\"\"create order takes address_id as query parameter to be inserted while creating the order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_id = request.user.id\n order = Order.objects.filter(user_id=user_id)\n serializer = OrderSerializer(order, many=True)\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n address_id = request.query_params['address_id']\n data['user'] = request.user.id\n data['address'] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_201_CREATED)\n return Response({'result': serializer.errors, 'message': 'Done', 'status': False}, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000028", "length_bytes": 1654, "license_type": "no_license", "methods": [{"docstring": "list user orders", "name": "details", "signature": "def details(self, request, *args, **kwargs)"}, {"docstring": "create order takes address_id as query parameter to be inserted while creating the order", "name": "create_order", "signature": "def create_order(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006179", "prompt": "Implement the Python class `OrderApi` described below.\n\nClass description:\nImplement the OrderApi class.\n\nMethod signatures and docstrings:\n- def details(self, request, *args, **kwargs): list user orders\n- def create_order(self, request): create order takes address_id as query parameter to be inserted while creating the order", "prompted_full_text": "Implement the Python class `OrderApi` described below.\n\nClass description:\nImplement the OrderApi class.\n\nMethod signatures and docstrings:\n- def details(self, request, *args, **kwargs): list user orders\n- def create_order(self, request): create order takes address_id as query parameter to be inserted while creating the order\n\n<|skeleton|>\nclass OrderApi:\n\n def details(self, request, *args, **kwargs):\n \"\"\"list user orders\"\"\"\n <|body_0|>\n\n def create_order(self, request):\n \"\"\"create order takes address_id as query parameter to be inserted while creating the order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_id = request.user.id\n order = Order.objects.filter(user_id=user_id)\n serializer = OrderSerializer(order, many=True)\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n address_id = request.query_params['address_id']\n data['user'] = request.user.id\n data['address'] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_201_CREATED)\n return Response({'result': serializer.errors, 'message': 'Done', 'status': False}, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n", "revision_id": "51f87a1ceddde427028f7229efcbe6abd730c655", "skeleton": "<|skeleton|>\nclass OrderApi:\n\n def details(self, request, *args, **kwargs):\n \"\"\"list user orders\"\"\"\n <|body_0|>\n\n def create_order(self, request):\n \"\"\"create order takes address_id as query parameter to be inserted while creating the order\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OrderApi:\n def details(self, request, *args, **kwargs):\n \"\"\"list user orders\"\"\"\n user_id = request.user.id\n order = Order.objects.filter(user_id=user_id)\n serializer = OrderSerializer(order, many=True)\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_200_OK)\n\n def create_order(self, request):\n \"\"\"create order takes address_id as query parameter to be inserted while creating the order\"\"\"\n data = request.data\n address_id = request.query_params['address_id']\n data['user'] = request.user.id\n data['address'] = address_id\n serializer = OrderCreateSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'result': serializer.data, 'message': 'Done', 'status': True}, status=status.HTTP_201_CREATED)\n return Response({'result': serializer.errors, 'message': 'Done', 'status': False}, status=status.HTTP_400_BAD_REQUEST)\n", "source": "the_stack_v2_python_sparse", "source_path": "orders/api/views.py", "source_repo": "mofahmi99/e_commerce", "split": "val", "star_events_count": 0} {"blob_id": "c307b1a541a8541710c5cfa34e4a57b40733e25c", "bodies": ["self.params = {}\n'\\n 我们用标准差为weight_scale的高斯分布初始化参数W,\\n 偏置B的初始化都为0:\\n (其中randn函数是基于零均值和标准差的一个高斯分布)\\n '\nself.params['W1'] = weight_scale * np.random.randn(input_dims, hidden_dims)\nself.params['b1'] = np.zeros((hidden_dims,))\nself.params['W2'] = weight_scale * np.random.randn(hidden_dims, num_classes)\nself.params['b2'] = np.zeros((num_classes,))\n'\\n 可以看到,\\n 隐藏层的参数矩阵行数是3*32*32(即上一层输入层的神经元个数),列数是100,\\n 列数是自己定的没有什么依据;\\n 输出层的参数矩阵行数是100(即上一层隐藏层的神经元个数),列数为10,即种类\\n '", "loss, grads = (0, {})\nh1_out, h1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\nscores, out_cache = affine_forward(h1_out, self.params['W2'], self.params['b2'])\nloss, dout = softmax_loss(scores, y)\ndout, dw2, db2 = affine_backward(dout, out_cache)\ngrads['W2'] = dw2, grads['b2'] = db2\n_, dw1, db1 = affine_relu_backward(dout, h1_cache)\ngrads['W1'], grads['b1'] = (dw1, db1)\n'\\n 可以看到图片样本的数据梯度dout只起到了带路的作用,\\n 最终会被舍弃掉,我们只要loss关于参数的梯度,\\n 然后保存在字典里。\\n '\nreturn (loss, grads)"], "bodies_text": "<|body_start_0|>\n self.params = {}\n '\\n 我们用标准差为weight_scale的高斯分布初始化参数W,\\n 偏置B的初始化都为0:\\n (其中randn函数是基于零均值和标准差的一个高斯分布)\\n '\n self.params['W1'] = weight_scale * np.random.randn(input_dims, hidden_dims)\n self.params['b1'] = np.zeros((hidden_dims,))\n self.params['W2'] = weight_scale * np.random.randn(hidden_dims, num_classes)\n self.params['b2'] = np.zeros((num_classes,))\n '\\n 可以看到,\\n 隐藏层的参数矩阵行数是3*32*32(即上一层输入层的神经元个数),列数是100,\\n 列数是自己定的没有什么依据;\\n 输出层的参数矩阵行数是100(即上一层隐藏层的神经元个数),列数为10,即种类\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n loss, grads = (0, {})\n h1_out, h1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, out_cache = affine_forward(h1_out, self.params['W2'], self.params['b2'])\n loss, dout = softmax_loss(scores, y)\n dout, dw2, db2 = affine_backward(dout, out_cache)\n grads['W2'] = dw2, grads['b2'] = db2\n _, dw1, db1 = affine_relu_backward(dout, h1_cache)\n grads['W1'], grads['b1'] = (dw1, db1)\n '\\n 可以看到图片样本的数据梯度dout只起到了带路的作用,\\n 最终会被舍弃掉,我们只要loss关于参数的梯度,\\n 然后保存在字典里。\\n '\n return (loss, grads)\n<|end_body_1|>\n", "class_docstring": "首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。", "class_name": "TwoLayerNet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TwoLayerNet:\n \"\"\"首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\"\"\"\n\n def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001):\n \"\"\"我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\"\"\"\n <|body_0|>\n\n def loss(self, X, y):\n \"\"\"首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = {}\n '\\n 我们用标准差为weight_scale的高斯分布初始化参数W,\\n 偏置B的初始化都为0:\\n (其中randn函数是基于零均值和标准差的一个高斯分布)\\n '\n self.params['W1'] = weight_scale * np.random.randn(input_dims, hidden_dims)\n self.params['b1'] = np.zeros((hidden_dims,))\n self.params['W2'] = weight_scale * np.random.randn(hidden_dims, num_classes)\n self.params['b2'] = np.zeros((num_classes,))\n '\\n 可以看到,\\n 隐藏层的参数矩阵行数是3*32*32(即上一层输入层的神经元个数),列数是100,\\n 列数是自己定的没有什么依据;\\n 输出层的参数矩阵行数是100(即上一层隐藏层的神经元个数),列数为10,即种类\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n loss, grads = (0, {})\n h1_out, h1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, out_cache = affine_forward(h1_out, self.params['W2'], self.params['b2'])\n loss, dout = softmax_loss(scores, y)\n dout, dw2, db2 = affine_backward(dout, out_cache)\n grads['W2'] = dw2, grads['b2'] = db2\n _, dw1, db1 = affine_relu_backward(dout, h1_cache)\n grads['W1'], grads['b1'] = (dw1, db1)\n '\\n 可以看到图片样本的数据梯度dout只起到了带路的作用,\\n 最终会被舍弃掉,我们只要loss关于参数的梯度,\\n 然后保存在字典里。\\n '\n return (loss, grads)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000029", "length_bytes": 36287, "license_type": "no_license", "methods": [{"docstring": "我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:", "name": "__init__", "signature": "def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001)"}, {"docstring": "首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:", "name": "loss", "signature": "def loss(self, X, y)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004398", "prompt": "Implement the Python class `TwoLayerNet` described below.\n\nClass description:\n首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\n\nMethod signatures and docstrings:\n- def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001): 我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\n- def loss(self, X, y): 首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:", "prompted_full_text": "Implement the Python class `TwoLayerNet` described below.\n\nClass description:\n首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\n\nMethod signatures and docstrings:\n- def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001): 我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\n- def loss(self, X, y): 首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:\n\n<|skeleton|>\nclass TwoLayerNet:\n \"\"\"首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\"\"\"\n\n def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001):\n \"\"\"我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\"\"\"\n <|body_0|>\n\n def loss(self, X, y):\n \"\"\"首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = {}\n '\\n 我们用标准差为weight_scale的高斯分布初始化参数W,\\n 偏置B的初始化都为0:\\n (其中randn函数是基于零均值和标准差的一个高斯分布)\\n '\n self.params['W1'] = weight_scale * np.random.randn(input_dims, hidden_dims)\n self.params['b1'] = np.zeros((hidden_dims,))\n self.params['W2'] = weight_scale * np.random.randn(hidden_dims, num_classes)\n self.params['b2'] = np.zeros((num_classes,))\n '\\n 可以看到,\\n 隐藏层的参数矩阵行数是3*32*32(即上一层输入层的神经元个数),列数是100,\\n 列数是自己定的没有什么依据;\\n 输出层的参数矩阵行数是100(即上一层隐藏层的神经元个数),列数为10,即种类\\n '\n<|end_body_0|>\n\n<|body_start_1|>\n loss, grads = (0, {})\n h1_out, h1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, out_cache = affine_forward(h1_out, self.params['W2'], self.params['b2'])\n loss, dout = softmax_loss(scores, y)\n dout, dw2, db2 = affine_backward(dout, out_cache)\n grads['W2'] = dw2, grads['b2'] = db2\n _, dw1, db1 = affine_relu_backward(dout, h1_cache)\n grads['W1'], grads['b1'] = (dw1, db1)\n '\\n 可以看到图片样本的数据梯度dout只起到了带路的作用,\\n 最终会被舍弃掉,我们只要loss关于参数的梯度,\\n 然后保存在字典里。\\n '\n return (loss, grads)\n<|end_body_1|>\n", "revision_id": "e82d9577d8a7f4ce9950bc7e5a950592dff34bbd", "skeleton": "<|skeleton|>\nclass TwoLayerNet:\n \"\"\"首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\"\"\"\n\n def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001):\n \"\"\"我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\"\"\"\n <|body_0|>\n\n def loss(self, X, y):\n \"\"\"首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TwoLayerNet:\n \"\"\"首先,先初始化我们的神经网络。 毕竟,数据从输入层第一次流入到神经网络里,参数(W,B)不能为空,(w,b)是 一层的参数;(W,B)是所有层参数的统一。参数初始化也不能太大或太小,因此 (W,B)的初始化时很重要的,对整个神经网络的训练影响巨大,但如何proper 的初始化参数还没定论。\"\"\"\n\n def __init__(self, input_dims=32 * 32 * 3, hidden_dims=100, num_classes=10, weight_scale=0.001):\n \"\"\"我们把需要学习的参数(W,B)都存在self.params字典中, 其中每个元素都是numpy array:\"\"\"\n self.params = {}\n '\\n 我们用标准差为weight_scale的高斯分布初始化参数W,\\n 偏置B的初始化都为0:\\n (其中randn函数是基于零均值和标准差的一个高斯分布)\\n '\n self.params['W1'] = weight_scale * np.random.randn(input_dims, hidden_dims)\n self.params['b1'] = np.zeros((hidden_dims,))\n self.params['W2'] = weight_scale * np.random.randn(hidden_dims, num_classes)\n self.params['b2'] = np.zeros((num_classes,))\n '\\n 可以看到,\\n 隐藏层的参数矩阵行数是3*32*32(即上一层输入层的神经元个数),列数是100,\\n 列数是自己定的没有什么依据;\\n 输出层的参数矩阵行数是100(即上一层隐藏层的神经元个数),列数为10,即种类\\n '\n\n def loss(self, X, y):\n \"\"\"首先,输入的数据X是一个多维的array,shape为(样本图片的个数N * 32*32*3), y是与输入数据X对应的正确标签,shape为(N,)。 我们的loss函数目标输出一个损失值loss和一个grads字典, 其中存有loss关于隐层和输出层的参数(W,B)的梯度值:\"\"\"\n loss, grads = (0, {})\n h1_out, h1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, out_cache = affine_forward(h1_out, self.params['W2'], self.params['b2'])\n loss, dout = softmax_loss(scores, y)\n dout, dw2, db2 = affine_backward(dout, out_cache)\n grads['W2'] = dw2, grads['b2'] = db2\n _, dw1, db1 = affine_relu_backward(dout, h1_cache)\n grads['W1'], grads['b1'] = (dw1, db1)\n '\\n 可以看到图片样本的数据梯度dout只起到了带路的作用,\\n 最终会被舍弃掉,我们只要loss关于参数的梯度,\\n 然后保存在字典里。\\n '\n return (loss, grads)\n", "source": "the_stack_v2_python_sparse", "source_path": "assigment2/full_connect.py", "source_repo": "hduyuanfu/SHUQI_SHIYAN", "split": "val", "star_events_count": 0} {"blob_id": "c7ab3cf0404fe5ac98dc54572ddb24dd26a58afa", "bodies": ["self.capacity = capacity\nself.queue = deque()\nself.items = {}", "if key in self.items:\n self.queue.remove(key)\n self.queue.appendleft(key)\n return self.items[key]\nelse:\n return -1", "if key in self.items:\n self.queue.remove(key)\nelif len(self.queue) == self.capacity:\n del self.items[self.queue[-1]]\n self.queue.pop()\nself.queue.appendleft(key)\nself.items[key] = value"], "bodies_text": "<|body_start_0|>\n self.capacity = capacity\n self.queue = deque()\n self.items = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.items:\n self.queue.remove(key)\n self.queue.appendleft(key)\n return self.items[key]\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.items:\n self.queue.remove(key)\n elif len(self.queue) == self.capacity:\n del self.items[self.queue[-1]]\n self.queue.pop()\n self.queue.appendleft(key)\n self.items[key] = value\n<|end_body_2|>\n", "class_docstring": "", "class_name": "LRUCache", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.queue = deque()\n self.items = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.items:\n self.queue.remove(key)\n self.queue.appendleft(key)\n return self.items[key]\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.items:\n self.queue.remove(key)\n elif len(self.queue) == self.capacity:\n del self.items[self.queue[-1]]\n self.queue.pop()\n self.queue.appendleft(key)\n self.items[key] = value\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000030", "length_bytes": 1103, "license_type": "no_license", "methods": [{"docstring": ":type capacity: int", "name": "__init__", "signature": "def __init__(self, capacity)"}, {"docstring": ":type key: int :rtype: int", "name": "get", "signature": "def get(self, key)"}, {"docstring": ":type key: int :type value: int :rtype: None", "name": "put", "signature": "def put(self, key, value)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000844", "prompt": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: None", "prompted_full_text": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: None\n\n<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.queue = deque()\n self.items = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.items:\n self.queue.remove(key)\n self.queue.appendleft(key)\n return self.items[key]\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.items:\n self.queue.remove(key)\n elif len(self.queue) == self.capacity:\n del self.items[self.queue[-1]]\n self.queue.pop()\n self.queue.appendleft(key)\n self.items[key] = value\n<|end_body_2|>\n", "revision_id": "3e66f89e02ade703715237722eda2fa2b135bb79", "skeleton": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LRUCache:\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n self.capacity = capacity\n self.queue = deque()\n self.items = {}\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n if key in self.items:\n self.queue.remove(key)\n self.queue.appendleft(key)\n return self.items[key]\n else:\n return -1\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: None\"\"\"\n if key in self.items:\n self.queue.remove(key)\n elif len(self.queue) == self.capacity:\n del self.items[self.queue[-1]]\n self.queue.pop()\n self.queue.appendleft(key)\n self.items[key] = value\n", "source": "the_stack_v2_python_sparse", "source_path": "Amazon/Design/LRUCache.py", "source_repo": "sameersaini/hackerank", "split": "val", "star_events_count": 0} {"blob_id": "be32339233a4716666f09b2ab0e5801aba4e918d", "bodies": ["if TurboJPEGSingleton.__instance is None:\n TurboJPEGSingleton()\nreturn TurboJPEGSingleton.__instance", "try:\n from turbojpeg import TurboJPEG\n TurboJPEGSingleton.__instance = TurboJPEG()\nexcept Exception:\n _LOGGER.exception('Error loading libturbojpeg; Cameras may impact HomeKit performance')\n TurboJPEGSingleton.__instance = False"], "bodies_text": "<|body_start_0|>\n if TurboJPEGSingleton.__instance is None:\n TurboJPEGSingleton()\n return TurboJPEGSingleton.__instance\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n from turbojpeg import TurboJPEG\n TurboJPEGSingleton.__instance = TurboJPEG()\n except Exception:\n _LOGGER.exception('Error loading libturbojpeg; Cameras may impact HomeKit performance')\n TurboJPEGSingleton.__instance = False\n<|end_body_1|>\n", "class_docstring": "Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.", "class_name": "TurboJPEGSingleton", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TurboJPEGSingleton:\n \"\"\"Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\"\"\"\n\n def instance() -> TurboJPEG | Literal[False] | None:\n \"\"\"Singleton for TurboJPEG.\"\"\"\n <|body_0|>\n\n def __init__(self) -> None:\n \"\"\"Try to create TurboJPEG only once.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if TurboJPEGSingleton.__instance is None:\n TurboJPEGSingleton()\n return TurboJPEGSingleton.__instance\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n from turbojpeg import TurboJPEG\n TurboJPEGSingleton.__instance = TurboJPEG()\n except Exception:\n _LOGGER.exception('Error loading libturbojpeg; Cameras may impact HomeKit performance')\n TurboJPEGSingleton.__instance = False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000031", "length_bytes": 3257, "license_type": "permissive", "methods": [{"docstring": "Singleton for TurboJPEG.", "name": "instance", "signature": "def instance() -> TurboJPEG | Literal[False] | None"}, {"docstring": "Try to create TurboJPEG only once.", "name": "__init__", "signature": "def __init__(self) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003120", "prompt": "Implement the Python class `TurboJPEGSingleton` described below.\n\nClass description:\nLoad TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\n\nMethod signatures and docstrings:\n- def instance() -> TurboJPEG | Literal[False] | None: Singleton for TurboJPEG.\n- def __init__(self) -> None: Try to create TurboJPEG only once.", "prompted_full_text": "Implement the Python class `TurboJPEGSingleton` described below.\n\nClass description:\nLoad TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\n\nMethod signatures and docstrings:\n- def instance() -> TurboJPEG | Literal[False] | None: Singleton for TurboJPEG.\n- def __init__(self) -> None: Try to create TurboJPEG only once.\n\n<|skeleton|>\nclass TurboJPEGSingleton:\n \"\"\"Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\"\"\"\n\n def instance() -> TurboJPEG | Literal[False] | None:\n \"\"\"Singleton for TurboJPEG.\"\"\"\n <|body_0|>\n\n def __init__(self) -> None:\n \"\"\"Try to create TurboJPEG only once.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if TurboJPEGSingleton.__instance is None:\n TurboJPEGSingleton()\n return TurboJPEGSingleton.__instance\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n from turbojpeg import TurboJPEG\n TurboJPEGSingleton.__instance = TurboJPEG()\n except Exception:\n _LOGGER.exception('Error loading libturbojpeg; Cameras may impact HomeKit performance')\n TurboJPEGSingleton.__instance = False\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass TurboJPEGSingleton:\n \"\"\"Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\"\"\"\n\n def instance() -> TurboJPEG | Literal[False] | None:\n \"\"\"Singleton for TurboJPEG.\"\"\"\n <|body_0|>\n\n def __init__(self) -> None:\n \"\"\"Try to create TurboJPEG only once.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TurboJPEGSingleton:\n \"\"\"Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds.\"\"\"\n\n def instance() -> TurboJPEG | Literal[False] | None:\n \"\"\"Singleton for TurboJPEG.\"\"\"\n if TurboJPEGSingleton.__instance is None:\n TurboJPEGSingleton()\n return TurboJPEGSingleton.__instance\n\n def __init__(self) -> None:\n \"\"\"Try to create TurboJPEG only once.\"\"\"\n try:\n from turbojpeg import TurboJPEG\n TurboJPEGSingleton.__instance = TurboJPEG()\n except Exception:\n _LOGGER.exception('Error loading libturbojpeg; Cameras may impact HomeKit performance')\n TurboJPEGSingleton.__instance = False\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/camera/img_util.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "1b2213f9a2d8af807a4f1fa066d4e902809b182e", "bodies": ["self.sensor = Sensor('127.0.0.1', 8000)\nself.pump = P('127.0.0.1', 8000)\nself.pump.set_state = MagicMock(return_value=True)", "controller = Controller(self.sensor, self.pump, Decider(200, 0.1))\nself.sensor.measure = MagicMock(return_value=250)\nself.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\nself.sensor.measure = MagicMock(return_value=215)\nself.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\nself.sensor.measure = MagicMock(return_value=199)\nself.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])", "controller = Controller(self.sensor, self.pump, Decider(400, 0.08))\nself.sensor.measure = MagicMock(return_value=199)\nself.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\nself.sensor.measure = MagicMock(return_value=380)\nself.pump.get_state = MagicMock(return_value=P.PUMP_IN)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\nself.sensor.measure = MagicMock(return_value=401)\nself.pump.get_state = MagicMock(return_value=P.PUMP_IN)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\nself.sensor.measure = MagicMock(return_value=401)\nself.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\ncontroller.tick()\nself.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])"], "bodies_text": "<|body_start_0|>\n self.sensor = Sensor('127.0.0.1', 8000)\n self.pump = P('127.0.0.1', 8000)\n self.pump.set_state = MagicMock(return_value=True)\n<|end_body_0|>\n\n<|body_start_1|>\n controller = Controller(self.sensor, self.pump, Decider(200, 0.1))\n self.sensor.measure = MagicMock(return_value=250)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=215)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_1|>\n\n<|body_start_2|>\n controller = Controller(self.sensor, self.pump, Decider(400, 0.08))\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=380)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_2|>\n", "class_docstring": "Module tests for the water-regulation module", "class_name": "ModuleTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\"\"\"\n <|body_0|>\n\n def test_run_water_regulator1(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations.\"\"\"\n <|body_1|>\n\n def test_run_water_regulator2(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations, now with a changed decider.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = Sensor('127.0.0.1', 8000)\n self.pump = P('127.0.0.1', 8000)\n self.pump.set_state = MagicMock(return_value=True)\n<|end_body_0|>\n\n<|body_start_1|>\n controller = Controller(self.sensor, self.pump, Decider(200, 0.1))\n self.sensor.measure = MagicMock(return_value=250)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=215)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_1|>\n\n<|body_start_2|>\n controller = Controller(self.sensor, self.pump, Decider(400, 0.08))\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=380)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000032", "length_bytes": 3148, "license_type": "no_license", "methods": [{"docstring": "Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Run the sensor, pump, and controller against random real-life situations.", "name": "test_run_water_regulator1", "signature": "def test_run_water_regulator1(self)"}, {"docstring": "Run the sensor, pump, and controller against random real-life situations, now with a changed decider.", "name": "test_run_water_regulator2", "signature": "def test_run_water_regulator2(self)"}], "n_methods": 3, "prompt": "Implement the Python class `ModuleTests` described below.\n\nClass description:\nModule tests for the water-regulation module\n\nMethod signatures and docstrings:\n- def setUp(self): Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\n- def test_run_water_regulator1(self): Run the sensor, pump, and controller against random real-life situations.\n- def test_run_water_regulator2(self): Run the sensor, pump, and controller against random real-life situations, now with a changed decider.", "prompted_full_text": "Implement the Python class `ModuleTests` described below.\n\nClass description:\nModule tests for the water-regulation module\n\nMethod signatures and docstrings:\n- def setUp(self): Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\n- def test_run_water_regulator1(self): Run the sensor, pump, and controller against random real-life situations.\n- def test_run_water_regulator2(self): Run the sensor, pump, and controller against random real-life situations, now with a changed decider.\n\n<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\"\"\"\n <|body_0|>\n\n def test_run_water_regulator1(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations.\"\"\"\n <|body_1|>\n\n def test_run_water_regulator2(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations, now with a changed decider.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = Sensor('127.0.0.1', 8000)\n self.pump = P('127.0.0.1', 8000)\n self.pump.set_state = MagicMock(return_value=True)\n<|end_body_0|>\n\n<|body_start_1|>\n controller = Controller(self.sensor, self.pump, Decider(200, 0.1))\n self.sensor.measure = MagicMock(return_value=250)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=215)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_1|>\n\n<|body_start_2|>\n controller = Controller(self.sensor, self.pump, Decider(400, 0.08))\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=380)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n<|end_body_2|>\n", "revision_id": "b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1", "skeleton": "<|skeleton|>\nclass ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\"\"\"\n <|body_0|>\n\n def test_run_water_regulator1(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations.\"\"\"\n <|body_1|>\n\n def test_run_water_regulator2(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations, now with a changed decider.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModuleTests:\n \"\"\"Module tests for the water-regulation module\"\"\"\n\n def setUp(self):\n \"\"\"Declare the sensor and pump objects for each test, and declare the mock for the pump's state setter method.\"\"\"\n self.sensor = Sensor('127.0.0.1', 8000)\n self.pump = P('127.0.0.1', 8000)\n self.pump.set_state = MagicMock(return_value=True)\n\n def test_run_water_regulator1(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations.\"\"\"\n controller = Controller(self.sensor, self.pump, Decider(200, 0.1))\n self.sensor.measure = MagicMock(return_value=250)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=215)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OUT'])\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OUT)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n\n def test_run_water_regulator2(self):\n \"\"\"Run the sensor, pump, and controller against random real-life situations, now with a changed decider.\"\"\"\n controller = Controller(self.sensor, self.pump, Decider(400, 0.08))\n self.sensor.measure = MagicMock(return_value=199)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=380)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_IN'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_IN)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n self.sensor.measure = MagicMock(return_value=401)\n self.pump.get_state = MagicMock(return_value=P.PUMP_OFF)\n controller.tick()\n self.pump.set_state.assert_called_with(controller.actions['PUMP_OFF'])\n", "source": "the_stack_v2_python_sparse", "source_path": "students/Craig_Morton/Lesson06/water-regulation/waterregulation/integrationtest.py", "source_repo": "UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018", "split": "val", "star_events_count": 4} {"blob_id": "fcbf5cc791337372a8eaf2bb91a6ec75d4306cbf", "bodies": ["for i in range(1, 664):\n url = 'http://www.xicidaili.com/nt/' + str(i)\n yield Request(url, headers=self.header, callback=self.parse)", "ip_list = response.xpath('//table[@id=\"ip_list\"]/tr')\npre_item = XiciItem()\nfor ip in ip_list[1:]:\n pre_item['ip'] = ip.xpath('td[2]/text()')[0].extract()\n pre_item['port'] = ip.xpath('td[3]/text()')[0].extract()\n pre_item['speed'] = ip.xpath('td[7]/div/@title')[0].extract()\n pre_item['site'] = ip.xpath('string(td[4]/a/text())').extract()[0].strip()\n pre_item['surtime'] = ip.xpath('td[9]/text()')[0].extract()\n print(pre_item)\nreturn pre_item"], "bodies_text": "<|body_start_0|>\n for i in range(1, 664):\n url = 'http://www.xicidaili.com/nt/' + str(i)\n yield Request(url, headers=self.header, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n ip_list = response.xpath('//table[@id=\"ip_list\"]/tr')\n pre_item = XiciItem()\n for ip in ip_list[1:]:\n pre_item['ip'] = ip.xpath('td[2]/text()')[0].extract()\n pre_item['port'] = ip.xpath('td[3]/text()')[0].extract()\n pre_item['speed'] = ip.xpath('td[7]/div/@title')[0].extract()\n pre_item['site'] = ip.xpath('string(td[4]/a/text())').extract()[0].strip()\n pre_item['surtime'] = ip.xpath('td[9]/text()')[0].extract()\n print(pre_item)\n return pre_item\n<|end_body_1|>\n", "class_docstring": "", "class_name": "XicispiderSpider", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XicispiderSpider:\n\n def start_requests(self):\n \"\"\"初始url请求返回reponse给解析函数\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析每一个url返回的rasponse\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(1, 664):\n url = 'http://www.xicidaili.com/nt/' + str(i)\n yield Request(url, headers=self.header, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n ip_list = response.xpath('//table[@id=\"ip_list\"]/tr')\n pre_item = XiciItem()\n for ip in ip_list[1:]:\n pre_item['ip'] = ip.xpath('td[2]/text()')[0].extract()\n pre_item['port'] = ip.xpath('td[3]/text()')[0].extract()\n pre_item['speed'] = ip.xpath('td[7]/div/@title')[0].extract()\n pre_item['site'] = ip.xpath('string(td[4]/a/text())').extract()[0].strip()\n pre_item['surtime'] = ip.xpath('td[9]/text()')[0].extract()\n print(pre_item)\n return pre_item\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000033", "length_bytes": 1310, "license_type": "no_license", "methods": [{"docstring": "初始url请求返回reponse给解析函数", "name": "start_requests", "signature": "def start_requests(self)"}, {"docstring": "解析每一个url返回的rasponse", "name": "parse", "signature": "def parse(self, response)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001514", "prompt": "Implement the Python class `XicispiderSpider` described below.\n\nClass description:\nImplement the XicispiderSpider class.\n\nMethod signatures and docstrings:\n- def start_requests(self): 初始url请求返回reponse给解析函数\n- def parse(self, response): 解析每一个url返回的rasponse", "prompted_full_text": "Implement the Python class `XicispiderSpider` described below.\n\nClass description:\nImplement the XicispiderSpider class.\n\nMethod signatures and docstrings:\n- def start_requests(self): 初始url请求返回reponse给解析函数\n- def parse(self, response): 解析每一个url返回的rasponse\n\n<|skeleton|>\nclass XicispiderSpider:\n\n def start_requests(self):\n \"\"\"初始url请求返回reponse给解析函数\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析每一个url返回的rasponse\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(1, 664):\n url = 'http://www.xicidaili.com/nt/' + str(i)\n yield Request(url, headers=self.header, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n ip_list = response.xpath('//table[@id=\"ip_list\"]/tr')\n pre_item = XiciItem()\n for ip in ip_list[1:]:\n pre_item['ip'] = ip.xpath('td[2]/text()')[0].extract()\n pre_item['port'] = ip.xpath('td[3]/text()')[0].extract()\n pre_item['speed'] = ip.xpath('td[7]/div/@title')[0].extract()\n pre_item['site'] = ip.xpath('string(td[4]/a/text())').extract()[0].strip()\n pre_item['surtime'] = ip.xpath('td[9]/text()')[0].extract()\n print(pre_item)\n return pre_item\n<|end_body_1|>\n", "revision_id": "b2ec55d07736a4bc05be893cdecfef4e01b3cf9d", "skeleton": "<|skeleton|>\nclass XicispiderSpider:\n\n def start_requests(self):\n \"\"\"初始url请求返回reponse给解析函数\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析每一个url返回的rasponse\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class XicispiderSpider:\n def start_requests(self):\n \"\"\"初始url请求返回reponse给解析函数\"\"\"\n for i in range(1, 664):\n url = 'http://www.xicidaili.com/nt/' + str(i)\n yield Request(url, headers=self.header, callback=self.parse)\n\n def parse(self, response):\n \"\"\"解析每一个url返回的rasponse\"\"\"\n ip_list = response.xpath('//table[@id=\"ip_list\"]/tr')\n pre_item = XiciItem()\n for ip in ip_list[1:]:\n pre_item['ip'] = ip.xpath('td[2]/text()')[0].extract()\n pre_item['port'] = ip.xpath('td[3]/text()')[0].extract()\n pre_item['speed'] = ip.xpath('td[7]/div/@title')[0].extract()\n pre_item['site'] = ip.xpath('string(td[4]/a/text())').extract()[0].strip()\n pre_item['surtime'] = ip.xpath('td[9]/text()')[0].extract()\n print(pre_item)\n return pre_item\n", "source": "the_stack_v2_python_sparse", "source_path": "crawler/xici/xici/spiders/xicispider.py", "source_repo": "PubFork/Myrepo", "split": "val", "star_events_count": 0} {"blob_id": "8a5d39c88322e754dc29eebdcfab791e9aedbdae", "bodies": ["meetup = MeetupModels.get_specific(self, meetupId)\nmeetup = json.loads(meetup)\nif not meetup:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\nreturn ({'status': 200, 'meetups': meetup}, 200)", "delete_meetup = MeetupModels.delete_specific(self, meetupId)\nif delete_meetup is False:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\nreturn ({'status': 200, 'data': 'Meetup {} deleted'.format(meetupId)}, 200)"], "bodies_text": "<|body_start_0|>\n meetup = MeetupModels.get_specific(self, meetupId)\n meetup = json.loads(meetup)\n if not meetup:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'meetups': meetup}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n delete_meetup = MeetupModels.delete_specific(self, meetupId)\n if delete_meetup is False:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'data': 'Meetup {} deleted'.format(meetupId)}, 200)\n<|end_body_1|>\n", "class_docstring": "Class for specific meetup", "class_name": "OneMeetup", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OneMeetup:\n \"\"\"Class for specific meetup\"\"\"\n\n def get(self, meetupId, current_user):\n \"\"\"Method to fetch specific meetup\"\"\"\n <|body_0|>\n\n def delete(self, meetupId, current_user):\n \"\"\"Method to delete meetup record\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n meetup = MeetupModels.get_specific(self, meetupId)\n meetup = json.loads(meetup)\n if not meetup:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'meetups': meetup}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n delete_meetup = MeetupModels.delete_specific(self, meetupId)\n if delete_meetup is False:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'data': 'Meetup {} deleted'.format(meetupId)}, 200)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000034", "length_bytes": 5065, "license_type": "no_license", "methods": [{"docstring": "Method to fetch specific meetup", "name": "get", "signature": "def get(self, meetupId, current_user)"}, {"docstring": "Method to delete meetup record", "name": "delete", "signature": "def delete(self, meetupId, current_user)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006079", "prompt": "Implement the Python class `OneMeetup` described below.\n\nClass description:\nClass for specific meetup\n\nMethod signatures and docstrings:\n- def get(self, meetupId, current_user): Method to fetch specific meetup\n- def delete(self, meetupId, current_user): Method to delete meetup record", "prompted_full_text": "Implement the Python class `OneMeetup` described below.\n\nClass description:\nClass for specific meetup\n\nMethod signatures and docstrings:\n- def get(self, meetupId, current_user): Method to fetch specific meetup\n- def delete(self, meetupId, current_user): Method to delete meetup record\n\n<|skeleton|>\nclass OneMeetup:\n \"\"\"Class for specific meetup\"\"\"\n\n def get(self, meetupId, current_user):\n \"\"\"Method to fetch specific meetup\"\"\"\n <|body_0|>\n\n def delete(self, meetupId, current_user):\n \"\"\"Method to delete meetup record\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n meetup = MeetupModels.get_specific(self, meetupId)\n meetup = json.loads(meetup)\n if not meetup:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'meetups': meetup}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n delete_meetup = MeetupModels.delete_specific(self, meetupId)\n if delete_meetup is False:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'data': 'Meetup {} deleted'.format(meetupId)}, 200)\n<|end_body_1|>\n", "revision_id": "93c7aeb54c240b6312e6164859acd2c878e85825", "skeleton": "<|skeleton|>\nclass OneMeetup:\n \"\"\"Class for specific meetup\"\"\"\n\n def get(self, meetupId, current_user):\n \"\"\"Method to fetch specific meetup\"\"\"\n <|body_0|>\n\n def delete(self, meetupId, current_user):\n \"\"\"Method to delete meetup record\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OneMeetup:\n \"\"\"Class for specific meetup\"\"\"\n\n def get(self, meetupId, current_user):\n \"\"\"Method to fetch specific meetup\"\"\"\n meetup = MeetupModels.get_specific(self, meetupId)\n meetup = json.loads(meetup)\n if not meetup:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'meetups': meetup}, 200)\n\n def delete(self, meetupId, current_user):\n \"\"\"Method to delete meetup record\"\"\"\n delete_meetup = MeetupModels.delete_specific(self, meetupId)\n if delete_meetup is False:\n return ({'status': 404, 'error': 'Meetup {} does not exist'.format(meetupId)}, 404)\n return ({'status': 200, 'data': 'Meetup {} deleted'.format(meetupId)}, 200)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/api/v2/views/meetup_views.py", "source_repo": "matthenge/Questioner-api-v2", "split": "val", "star_events_count": 0} {"blob_id": "9a7f270b01d48dea90e25dbcde358c6325408bbe", "bodies": ["super().__init__(debug, config_path, file_list, vt_api_key)\nif self.os_name:\n try:\n self._WORKERS = self.config_dict[self.os_name]['scanner']['yara']['threads']\n self._YARA_STORAGE = self.config_dict[self.os_name]['update']['yara']['storage']\n except KeyError:\n self.logger.log('Could not load configuration for: {}'.format(self.os_name), logtype='error')\n sys.exit(0)\nelse:\n self.logger.log('Could not determine the OS', logtype='error')\n sys.exit(0)", "if yara_status:\n yara_files_list = os.listdir(self._YARA_STORAGE)\n for yara_file in yara_files_list:\n if yara_file.endswith('.yar') or yara_file.endswith('.yara'):\n yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)\n rule_compile = yara.compile(yara_file_path)\n matches = rule_compile.match(file_path)\n if matches:\n self.logger.log('Possible malicious file detected: {0}'.format(file_path), logtype='warning')\n if file_path not in self.malicious_file_list:\n self.malicious_file_list.append(file_path)\n super().check_virus_total(file_path)\n return\n return"], "bodies_text": "<|body_start_0|>\n super().__init__(debug, config_path, file_list, vt_api_key)\n if self.os_name:\n try:\n self._WORKERS = self.config_dict[self.os_name]['scanner']['yara']['threads']\n self._YARA_STORAGE = self.config_dict[self.os_name]['update']['yara']['storage']\n except KeyError:\n self.logger.log('Could not load configuration for: {}'.format(self.os_name), logtype='error')\n sys.exit(0)\n else:\n self.logger.log('Could not determine the OS', logtype='error')\n sys.exit(0)\n<|end_body_0|>\n\n<|body_start_1|>\n if yara_status:\n yara_files_list = os.listdir(self._YARA_STORAGE)\n for yara_file in yara_files_list:\n if yara_file.endswith('.yar') or yara_file.endswith('.yara'):\n yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)\n rule_compile = yara.compile(yara_file_path)\n matches = rule_compile.match(file_path)\n if matches:\n self.logger.log('Possible malicious file detected: {0}'.format(file_path), logtype='warning')\n if file_path not in self.malicious_file_list:\n self.malicious_file_list.append(file_path)\n super().check_virus_total(file_path)\n return\n return\n<|end_body_1|>\n", "class_docstring": "YaraScanner class.", "class_name": "YaraScanner", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YaraScanner:\n \"\"\"YaraScanner class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def scan_file(self, file_path):\n \"\"\"Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(debug, config_path, file_list, vt_api_key)\n if self.os_name:\n try:\n self._WORKERS = self.config_dict[self.os_name]['scanner']['yara']['threads']\n self._YARA_STORAGE = self.config_dict[self.os_name]['update']['yara']['storage']\n except KeyError:\n self.logger.log('Could not load configuration for: {}'.format(self.os_name), logtype='error')\n sys.exit(0)\n else:\n self.logger.log('Could not determine the OS', logtype='error')\n sys.exit(0)\n<|end_body_0|>\n\n<|body_start_1|>\n if yara_status:\n yara_files_list = os.listdir(self._YARA_STORAGE)\n for yara_file in yara_files_list:\n if yara_file.endswith('.yar') or yara_file.endswith('.yara'):\n yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)\n rule_compile = yara.compile(yara_file_path)\n matches = rule_compile.match(file_path)\n if matches:\n self.logger.log('Possible malicious file detected: {0}'.format(file_path), logtype='warning')\n if file_path not in self.malicious_file_list:\n self.malicious_file_list.append(file_path)\n super().check_virus_total(file_path)\n return\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000035", "length_bytes": 3257, "license_type": "permissive", "methods": [{"docstring": "Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None", "name": "__init__", "signature": "def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None)"}, {"docstring": "Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None", "name": "scan_file", "signature": "def scan_file(self, file_path)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007369", "prompt": "Implement the Python class `YaraScanner` described below.\n\nClass description:\nYaraScanner class.\n\nMethod signatures and docstrings:\n- def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None): Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\n- def scan_file(self, file_path): Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None", "prompted_full_text": "Implement the Python class `YaraScanner` described below.\n\nClass description:\nYaraScanner class.\n\nMethod signatures and docstrings:\n- def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None): Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\n- def scan_file(self, file_path): Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None\n\n<|skeleton|>\nclass YaraScanner:\n \"\"\"YaraScanner class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def scan_file(self, file_path):\n \"\"\"Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(debug, config_path, file_list, vt_api_key)\n if self.os_name:\n try:\n self._WORKERS = self.config_dict[self.os_name]['scanner']['yara']['threads']\n self._YARA_STORAGE = self.config_dict[self.os_name]['update']['yara']['storage']\n except KeyError:\n self.logger.log('Could not load configuration for: {}'.format(self.os_name), logtype='error')\n sys.exit(0)\n else:\n self.logger.log('Could not determine the OS', logtype='error')\n sys.exit(0)\n<|end_body_0|>\n\n<|body_start_1|>\n if yara_status:\n yara_files_list = os.listdir(self._YARA_STORAGE)\n for yara_file in yara_files_list:\n if yara_file.endswith('.yar') or yara_file.endswith('.yara'):\n yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)\n rule_compile = yara.compile(yara_file_path)\n matches = rule_compile.match(file_path)\n if matches:\n self.logger.log('Possible malicious file detected: {0}'.format(file_path), logtype='warning')\n if file_path not in self.malicious_file_list:\n self.malicious_file_list.append(file_path)\n super().check_virus_total(file_path)\n return\n return\n<|end_body_1|>\n", "revision_id": "43dec187e5848b9ced8a6b4957b6e9028d4d43cd", "skeleton": "<|skeleton|>\nclass YaraScanner:\n \"\"\"YaraScanner class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def scan_file(self, file_path):\n \"\"\"Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class YaraScanner:\n \"\"\"YaraScanner class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize YaraEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n super().__init__(debug, config_path, file_list, vt_api_key)\n if self.os_name:\n try:\n self._WORKERS = self.config_dict[self.os_name]['scanner']['yara']['threads']\n self._YARA_STORAGE = self.config_dict[self.os_name]['update']['yara']['storage']\n except KeyError:\n self.logger.log('Could not load configuration for: {}'.format(self.os_name), logtype='error')\n sys.exit(0)\n else:\n self.logger.log('Could not determine the OS', logtype='error')\n sys.exit(0)\n\n def scan_file(self, file_path):\n \"\"\"Scan file using Yara rules. Args: file_path (str): Path of the file to scan Raises: None Returns: None\"\"\"\n if yara_status:\n yara_files_list = os.listdir(self._YARA_STORAGE)\n for yara_file in yara_files_list:\n if yara_file.endswith('.yar') or yara_file.endswith('.yara'):\n yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)\n rule_compile = yara.compile(yara_file_path)\n matches = rule_compile.match(file_path)\n if matches:\n self.logger.log('Possible malicious file detected: {0}'.format(file_path), logtype='warning')\n if file_path not in self.malicious_file_list:\n self.malicious_file_list.append(file_path)\n super().check_virus_total(file_path)\n return\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "securetea/lib/antivirus/scanner/yara_scanner.py", "source_repo": "rejahrehim/SecureTea-Project", "split": "val", "star_events_count": 1} {"blob_id": "e8645b24f89a25162c401501d9a79fc8da214a54", "bodies": ["self._cache = cache\nself._package = package\nself._service_manager = service_manager", "self._service_manager.RecordServices()\ntry:\n self._package.mark_install(True, True, False)\n logging.info('Installing...')\n self._cache.commit()\nexcept (apt.cache.FetchFailedException, SystemError) as e:\n logging.error('Could not install package %s: %s', self._package.name, e)\n raise triggers.TriggerError('Could not install package %s: %s' % (self._package.name, str(e)))\nself._service_manager.RecordNewServices()"], "bodies_text": "<|body_start_0|>\n self._cache = cache\n self._package = package\n self._service_manager = service_manager\n<|end_body_0|>\n\n<|body_start_1|>\n self._service_manager.RecordServices()\n try:\n self._package.mark_install(True, True, False)\n logging.info('Installing...')\n self._cache.commit()\n except (apt.cache.FetchFailedException, SystemError) as e:\n logging.error('Could not install package %s: %s', self._package.name, e)\n raise triggers.TriggerError('Could not install package %s: %s' % (self._package.name, str(e)))\n self._service_manager.RecordNewServices()\n<|end_body_1|>\n", "class_docstring": "A Debian \"install\" trigger.", "class_name": "DebInstall", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DebInstall:\n \"\"\"A Debian \"install\" trigger.\"\"\"\n\n def __init__(self, cache, package, service_manager):\n \"\"\"Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\"\"\"\n <|body_0|>\n\n def RunTrigger(self):\n \"\"\"Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._cache = cache\n self._package = package\n self._service_manager = service_manager\n<|end_body_0|>\n\n<|body_start_1|>\n self._service_manager.RecordServices()\n try:\n self._package.mark_install(True, True, False)\n logging.info('Installing...')\n self._cache.commit()\n except (apt.cache.FetchFailedException, SystemError) as e:\n logging.error('Could not install package %s: %s', self._package.name, e)\n raise triggers.TriggerError('Could not install package %s: %s' % (self._package.name, str(e)))\n self._service_manager.RecordNewServices()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000036", "length_bytes": 10965, "license_type": "no_license", "methods": [{"docstring": "Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.", "name": "__init__", "signature": "def __init__(self, cache, package, service_manager)"}, {"docstring": "Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.", "name": "RunTrigger", "signature": "def RunTrigger(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001295", "prompt": "Implement the Python class `DebInstall` described below.\n\nClass description:\nA Debian \"install\" trigger.\n\nMethod signatures and docstrings:\n- def __init__(self, cache, package, service_manager): Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\n- def RunTrigger(self): Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.", "prompted_full_text": "Implement the Python class `DebInstall` described below.\n\nClass description:\nA Debian \"install\" trigger.\n\nMethod signatures and docstrings:\n- def __init__(self, cache, package, service_manager): Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\n- def RunTrigger(self): Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.\n\n<|skeleton|>\nclass DebInstall:\n \"\"\"A Debian \"install\" trigger.\"\"\"\n\n def __init__(self, cache, package, service_manager):\n \"\"\"Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\"\"\"\n <|body_0|>\n\n def RunTrigger(self):\n \"\"\"Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._cache = cache\n self._package = package\n self._service_manager = service_manager\n<|end_body_0|>\n\n<|body_start_1|>\n self._service_manager.RecordServices()\n try:\n self._package.mark_install(True, True, False)\n logging.info('Installing...')\n self._cache.commit()\n except (apt.cache.FetchFailedException, SystemError) as e:\n logging.error('Could not install package %s: %s', self._package.name, e)\n raise triggers.TriggerError('Could not install package %s: %s' % (self._package.name, str(e)))\n self._service_manager.RecordNewServices()\n<|end_body_1|>\n", "revision_id": "3fa5a9d67eb4eea87d3a54b1af5946cec8b67cca", "skeleton": "<|skeleton|>\nclass DebInstall:\n \"\"\"A Debian \"install\" trigger.\"\"\"\n\n def __init__(self, cache, package, service_manager):\n \"\"\"Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\"\"\"\n <|body_0|>\n\n def RunTrigger(self):\n \"\"\"Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DebInstall:\n \"\"\"A Debian \"install\" trigger.\"\"\"\n\n def __init__(self, cache, package, service_manager):\n \"\"\"Constructor. Args: cache: An apt cache. package: A package to be installed. service_manager: A service manager for the application.\"\"\"\n self._cache = cache\n self._package = package\n self._service_manager = service_manager\n\n def RunTrigger(self):\n \"\"\"Run a Debian \"install\" trigger. This trigger first records services present on the system, then installs the package under test, and finally records the newly installed services. Raises: triggers.TriggerError if the package cannot be installed.\"\"\"\n self._service_manager.RecordServices()\n try:\n self._package.mark_install(True, True, False)\n logging.info('Installing...')\n self._cache.commit()\n except (apt.cache.FetchFailedException, SystemError) as e:\n logging.error('Could not install package %s: %s', self._package.name, e)\n raise triggers.TriggerError('Could not install package %s: %s' % (self._package.name, str(e)))\n self._service_manager.RecordNewServices()\n", "source": "the_stack_v2_python_sparse", "source_path": "guest/deb_triggers.py", "source_repo": "tojo2000/wheelbarrow", "split": "val", "star_events_count": 0} {"blob_id": "74d8c37775bcba9cac2315c1d7f9a27e52f89daf", "bodies": ["super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)\nself.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\nif pos_bias_u is None or pos_bias_v is None:\n self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n nn.init.zeros_(self.pos_bias_u)\n nn.init.zeros_(self.pos_bias_v)\nelse:\n self.pos_bias_u = pos_bias_u\n self.pos_bias_v = pos_bias_v", "b, h, qlen, pos_len = x.size()\nx = torch.nn.functional.pad(x, pad=(1, 0))\nx = x.view(b, h, -1, qlen)\nx = x[:, :, 1:].view(b, h, qlen, pos_len)\nreturn x", "key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)\nif torch.is_autocast_enabled():\n query, key, value = (query.to(torch.float32), key.to(torch.float32), value.to(torch.float32))\nwith avoid_float16_autocast_context():\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n matrix_bd = matrix_bd[:, :, :, :matrix_ac.size(-1)]\n scores = (matrix_ac + matrix_bd) / self.s_d_k\n out = self.forward_attention(v, scores, mask)\nif cache is None:\n return out\nelse:\n return (out, cache)"], "bodies_text": "<|body_start_0|>\n super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n if pos_bias_u is None or pos_bias_v is None:\n self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n nn.init.zeros_(self.pos_bias_u)\n nn.init.zeros_(self.pos_bias_v)\n else:\n self.pos_bias_u = pos_bias_u\n self.pos_bias_v = pos_bias_v\n<|end_body_0|>\n\n<|body_start_1|>\n b, h, qlen, pos_len = x.size()\n x = torch.nn.functional.pad(x, pad=(1, 0))\n x = x.view(b, h, -1, qlen)\n x = x[:, :, 1:].view(b, h, qlen, pos_len)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)\n if torch.is_autocast_enabled():\n query, key, value = (query.to(torch.float32), key.to(torch.float32), value.to(torch.float32))\n with avoid_float16_autocast_context():\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n matrix_bd = matrix_bd[:, :, :, :matrix_ac.size(-1)]\n scores = (matrix_ac + matrix_bd) / self.s_d_k\n out = self.forward_attention(v, scores, mask)\n if cache is None:\n return out\n else:\n return (out, cache)\n<|end_body_2|>\n", "class_docstring": "Multi-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate", "class_name": "RelPositionMultiHeadAttention", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RelPositionMultiHeadAttention:\n \"\"\"Multi-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n <|body_0|>\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\"\"\"\n <|body_1|>\n\n def forward(self, query, key, value, mask, pos_emb, cache=None):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n if pos_bias_u is None or pos_bias_v is None:\n self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n nn.init.zeros_(self.pos_bias_u)\n nn.init.zeros_(self.pos_bias_v)\n else:\n self.pos_bias_u = pos_bias_u\n self.pos_bias_v = pos_bias_v\n<|end_body_0|>\n\n<|body_start_1|>\n b, h, qlen, pos_len = x.size()\n x = torch.nn.functional.pad(x, pad=(1, 0))\n x = x.view(b, h, -1, qlen)\n x = x[:, :, 1:].view(b, h, qlen, pos_len)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)\n if torch.is_autocast_enabled():\n query, key, value = (query.to(torch.float32), key.to(torch.float32), value.to(torch.float32))\n with avoid_float16_autocast_context():\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n matrix_bd = matrix_bd[:, :, :, :matrix_ac.size(-1)]\n scores = (matrix_ac + matrix_bd) / self.s_d_k\n out = self.forward_attention(v, scores, mask)\n if cache is None:\n return out\n else:\n return (out, cache)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000037", "length_bytes": 45820, "license_type": "permissive", "methods": [{"docstring": "Construct an RelPositionMultiHeadedAttention object.", "name": "__init__", "signature": "def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0)"}, {"docstring": "Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)", "name": "rel_shift", "signature": "def rel_shift(self, x)"}, {"docstring": "Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)", "name": "forward", "signature": "def forward(self, query, key, value, mask, pos_emb, cache=None)"}], "n_methods": 3, "prompt": "Implement the Python class `RelPositionMultiHeadAttention` described below.\n\nClass description:\nMulti-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\n\nMethod signatures and docstrings:\n- def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0): Construct an RelPositionMultiHeadedAttention object.\n- def rel_shift(self, x): Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\n- def forward(self, query, key, value, mask, pos_emb, cache=None): Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)", "prompted_full_text": "Implement the Python class `RelPositionMultiHeadAttention` described below.\n\nClass description:\nMulti-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\n\nMethod signatures and docstrings:\n- def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0): Construct an RelPositionMultiHeadedAttention object.\n- def rel_shift(self, x): Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\n- def forward(self, query, key, value, mask, pos_emb, cache=None): Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)\n\n<|skeleton|>\nclass RelPositionMultiHeadAttention:\n \"\"\"Multi-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n <|body_0|>\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\"\"\"\n <|body_1|>\n\n def forward(self, query, key, value, mask, pos_emb, cache=None):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n if pos_bias_u is None or pos_bias_v is None:\n self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n nn.init.zeros_(self.pos_bias_u)\n nn.init.zeros_(self.pos_bias_v)\n else:\n self.pos_bias_u = pos_bias_u\n self.pos_bias_v = pos_bias_v\n<|end_body_0|>\n\n<|body_start_1|>\n b, h, qlen, pos_len = x.size()\n x = torch.nn.functional.pad(x, pad=(1, 0))\n x = x.view(b, h, -1, qlen)\n x = x[:, :, 1:].view(b, h, qlen, pos_len)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)\n if torch.is_autocast_enabled():\n query, key, value = (query.to(torch.float32), key.to(torch.float32), value.to(torch.float32))\n with avoid_float16_autocast_context():\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n matrix_bd = matrix_bd[:, :, :, :matrix_ac.size(-1)]\n scores = (matrix_ac + matrix_bd) / self.s_d_k\n out = self.forward_attention(v, scores, mask)\n if cache is None:\n return out\n else:\n return (out, cache)\n<|end_body_2|>\n", "revision_id": "c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7", "skeleton": "<|skeleton|>\nclass RelPositionMultiHeadAttention:\n \"\"\"Multi-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n <|body_0|>\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\"\"\"\n <|body_1|>\n\n def forward(self, query, key, value, mask, pos_emb, cache=None):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RelPositionMultiHeadAttention:\n \"\"\"Multi-Head Attention layer of Transformer-XL with support of relative positional encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): number of heads n_feat (int): size of the features dropout_rate (float): dropout rate\"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v, max_cache_len=0):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head=n_head, n_feat=n_feat, dropout_rate=dropout_rate, max_cache_len=max_cache_len)\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n if pos_bias_u is None or pos_bias_v is None:\n self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))\n nn.init.zeros_(self.pos_bias_u)\n nn.init.zeros_(self.pos_bias_v)\n else:\n self.pos_bias_u = pos_bias_u\n self.pos_bias_v = pos_bias_v\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding. Args: x (torch.Tensor): (batch, nheads, time, 2*time-1)\"\"\"\n b, h, qlen, pos_len = x.size()\n x = torch.nn.functional.pad(x, pad=(1, 0))\n x = x.view(b, h, -1, qlen)\n x = x[:, :, 1:].view(b, h, qlen, pos_len)\n return x\n\n def forward(self, query, key, value, mask, pos_emb, cache=None):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): (batch, time1, size) key (torch.Tensor): (batch, time2, size) value(torch.Tensor): (batch, time2, size) mask (torch.Tensor): (batch, time1, time2) pos_emb (torch.Tensor) : (batch, time1, size) cache (torch.Tensor) : (batch, time_cache, size) Returns: output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention cache (torch.Tensor) : (batch, time_cache_next, size)\"\"\"\n key, value, query, cache = self.update_cache(key=key, value=value, query=query, cache=cache)\n if torch.is_autocast_enabled():\n query, key, value = (query.to(torch.float32), key.to(torch.float32), value.to(torch.float32))\n with avoid_float16_autocast_context():\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2)\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n matrix_bd = matrix_bd[:, :, :, :matrix_ac.size(-1)]\n scores = (matrix_ac + matrix_bd) / self.s_d_k\n out = self.forward_attention(v, scores, mask)\n if cache is None:\n return out\n else:\n return (out, cache)\n", "source": "the_stack_v2_python_sparse", "source_path": "nemo/collections/asr/parts/submodules/multi_head_attention.py", "source_repo": "NVIDIA/NeMo", "split": "val", "star_events_count": 7957} {"blob_id": "e96660cb3378dd4cc6e0dae6ceffa1dcdc064953", "bodies": ["object.__init__(self)\nself.name = name\nself.decls = decls", "for d in self.decls:\n callable_ = getattr(d, self.name)\n callable_(*arguments, **keywords)"], "bodies_text": "<|body_start_0|>\n object.__init__(self)\n self.name = name\n self.decls = decls\n<|end_body_0|>\n\n<|body_start_1|>\n for d in self.decls:\n callable_ = getattr(d, self.name)\n callable_(*arguments, **keywords)\n<|end_body_1|>\n", "class_docstring": "Internal class used to call some function of objects", "class_name": "call_redirector_t", "detected_licenses": ["IJG", "Zlib", "LicenseRef-scancode-proprietary-license", "SMLNJ", "BSD-3-Clause", "BSD-4.3TAHOE", "LicenseRef-scancode-free-unknown", "Spencer-86", "LicenseRef-scancode-llnl", "FSFUL", "Libpng", "libtiff", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-permissive", "LicenseRef-scancode-hdf5", "MIT", "NTP", "LicenseRef-scancode-mit-old-style", "GPL-1.0-or-later", "LicenseRef-scancode-unknown-license-reference", "MPL-2.0", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass call_redirector_t:\n \"\"\"Internal class used to call some function of objects\"\"\"\n\n def __init__(self, name, decls):\n \"\"\"creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\"\"\"\n <|body_0|>\n\n def __call__(self, *arguments, **keywords):\n \"\"\"calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__init__(self)\n self.name = name\n self.decls = decls\n<|end_body_0|>\n\n<|body_start_1|>\n for d in self.decls:\n callable_ = getattr(d, self.name)\n callable_(*arguments, **keywords)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000038", "length_bytes": 3174, "license_type": "permissive", "methods": [{"docstring": "creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects", "name": "__init__", "signature": "def __init__(self, name, decls)"}, {"docstring": "calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list", "name": "__call__", "signature": "def __call__(self, *arguments, **keywords)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003331", "prompt": "Implement the Python class `call_redirector_t` described below.\n\nClass description:\nInternal class used to call some function of objects\n\nMethod signatures and docstrings:\n- def __init__(self, name, decls): creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\n- def __call__(self, *arguments, **keywords): calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list", "prompted_full_text": "Implement the Python class `call_redirector_t` described below.\n\nClass description:\nInternal class used to call some function of objects\n\nMethod signatures and docstrings:\n- def __init__(self, name, decls): creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\n- def __call__(self, *arguments, **keywords): calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list\n\n<|skeleton|>\nclass call_redirector_t:\n \"\"\"Internal class used to call some function of objects\"\"\"\n\n def __init__(self, name, decls):\n \"\"\"creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\"\"\"\n <|body_0|>\n\n def __call__(self, *arguments, **keywords):\n \"\"\"calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__init__(self)\n self.name = name\n self.decls = decls\n<|end_body_0|>\n\n<|body_start_1|>\n for d in self.decls:\n callable_ = getattr(d, self.name)\n callable_(*arguments, **keywords)\n<|end_body_1|>\n", "revision_id": "3eb8fd7cdfbc5ac2d0c2e5e776848a4cbab3d7e1", "skeleton": "<|skeleton|>\nclass call_redirector_t:\n \"\"\"Internal class used to call some function of objects\"\"\"\n\n def __init__(self, name, decls):\n \"\"\"creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\"\"\"\n <|body_0|>\n\n def __call__(self, *arguments, **keywords):\n \"\"\"calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class call_redirector_t:\n \"\"\"Internal class used to call some function of objects\"\"\"\n\n def __init__(self, name, decls):\n \"\"\"creates call_redirector_t instance. :param name: name of method, to be called on every object in the `decls` list :param decls: list of objects\"\"\"\n object.__init__(self)\n self.name = name\n self.decls = decls\n\n def __call__(self, *arguments, **keywords):\n \"\"\"calls method :attr:`call_redirector_t.name` on every object within the :attr:`call_redirector_t.decls` list\"\"\"\n for d in self.decls:\n callable_ = getattr(d, self.name)\n callable_(*arguments, **keywords)\n", "source": "the_stack_v2_python_sparse", "source_path": "Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py", "source_repo": "InsightSoftwareConsortium/ITK", "split": "val", "star_events_count": 1229} {"blob_id": "49a56d1af660c07dae8fbedcbca98615c35e1d64", "bodies": ["s = len(matrix)\nfor i in range(s):\n for j in range(s - i):\n matrix[i][j], matrix[s - j - 1][s - i - 1] = (matrix[s - j - 1][s - i - 1], matrix[i][j])\nfor i in range(s / 2):\n for j in range(s):\n matrix[i][j], matrix[s - 1 - i][j] = (matrix[s - 1 - i][j], matrix[i][j])", "lo = 0\nhi = len(matrix) - 1\nwhile lo < hi:\n for i in range(hi - lo):\n temp = matrix[lo][lo + i]\n matrix[lo][lo + i] = matrix[hi - i][lo]\n matrix[hi - i][lo] = matrix[hi][hi - i]\n matrix[hi][hi - i] = matrix[lo + i][hi]\n matrix[lo + i][hi] = temp\n lo += 1\n hi -= 1"], "bodies_text": "<|body_start_0|>\n s = len(matrix)\n for i in range(s):\n for j in range(s - i):\n matrix[i][j], matrix[s - j - 1][s - i - 1] = (matrix[s - j - 1][s - i - 1], matrix[i][j])\n for i in range(s / 2):\n for j in range(s):\n matrix[i][j], matrix[s - 1 - i][j] = (matrix[s - 1 - i][j], matrix[i][j])\n<|end_body_0|>\n\n<|body_start_1|>\n lo = 0\n hi = len(matrix) - 1\n while lo < hi:\n for i in range(hi - lo):\n temp = matrix[lo][lo + i]\n matrix[lo][lo + i] = matrix[hi - i][lo]\n matrix[hi - i][lo] = matrix[hi][hi - i]\n matrix[hi][hi - i] = matrix[lo + i][hi]\n matrix[lo + i][hi] = temp\n lo += 1\n hi -= 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = len(matrix)\n for i in range(s):\n for j in range(s - i):\n matrix[i][j], matrix[s - j - 1][s - i - 1] = (matrix[s - j - 1][s - i - 1], matrix[i][j])\n for i in range(s / 2):\n for j in range(s):\n matrix[i][j], matrix[s - 1 - i][j] = (matrix[s - 1 - i][j], matrix[i][j])\n<|end_body_0|>\n\n<|body_start_1|>\n lo = 0\n hi = len(matrix) - 1\n while lo < hi:\n for i in range(hi - lo):\n temp = matrix[lo][lo + i]\n matrix[lo][lo + i] = matrix[hi - i][lo]\n matrix[hi - i][lo] = matrix[hi][hi - i]\n matrix[hi][hi - i] = matrix[lo + i][hi]\n matrix[lo + i][hi] = temp\n lo += 1\n hi -= 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000039", "length_bytes": 1160, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "rotate", "signature": "def rotate(self, matrix)"}, {"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "rotate_2", "signature": "def rotate_2(self, matrix)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004177", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate_2(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate_2(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = len(matrix)\n for i in range(s):\n for j in range(s - i):\n matrix[i][j], matrix[s - j - 1][s - i - 1] = (matrix[s - j - 1][s - i - 1], matrix[i][j])\n for i in range(s / 2):\n for j in range(s):\n matrix[i][j], matrix[s - 1 - i][j] = (matrix[s - 1 - i][j], matrix[i][j])\n<|end_body_0|>\n\n<|body_start_1|>\n lo = 0\n hi = len(matrix) - 1\n while lo < hi:\n for i in range(hi - lo):\n temp = matrix[lo][lo + i]\n matrix[lo][lo + i] = matrix[hi - i][lo]\n matrix[hi - i][lo] = matrix[hi][hi - i]\n matrix[hi][hi - i] = matrix[lo + i][hi]\n matrix[lo + i][hi] = temp\n lo += 1\n hi -= 1\n<|end_body_1|>\n", "revision_id": "bd8df12c0d4afd048cf1b58b04c27fa1f3622769", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n s = len(matrix)\n for i in range(s):\n for j in range(s - i):\n matrix[i][j], matrix[s - j - 1][s - i - 1] = (matrix[s - j - 1][s - i - 1], matrix[i][j])\n for i in range(s / 2):\n for j in range(s):\n matrix[i][j], matrix[s - 1 - i][j] = (matrix[s - 1 - i][j], matrix[i][j])\n\n def rotate_2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n lo = 0\n hi = len(matrix) - 1\n while lo < hi:\n for i in range(hi - lo):\n temp = matrix[lo][lo + i]\n matrix[lo][lo + i] = matrix[hi - i][lo]\n matrix[hi - i][lo] = matrix[hi][hi - i]\n matrix[hi][hi - i] = matrix[lo + i][hi]\n matrix[lo + i][hi] = temp\n lo += 1\n hi -= 1\n", "source": "the_stack_v2_python_sparse", "source_path": "48_rotate_image.py", "source_repo": "aojugg/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "72dc28be21e84567063e62a7b57fe638d79ba093", "bodies": ["networks = self.sf.query_all(format_soql('SELECT Id FROM Network WHERE Name = {network_name} LIMIT 1', network_name=network_name))\nif not networks['records']:\n raise SalesforceException(f'No Network record found with Name \"{network_name}\"')\nself.logger.info(f'Creating NetworkMemberGroup records for {network_name} Network:')\nreturn networks['records'][0]['Id']", "network_member_group_parent_ids = set()\nfor record in self.sf.query_all(f\"SELECT ParentId FROM NetworkMemberGroup WHERE NetworkId = '{network_id}'\")['records']:\n network_member_group_parent_ids.add(record['ParentId'])\nreturn network_member_group_parent_ids", "parent_ids_by_name = dict(((name, None) for name in record_names))\nif sobject_type == 'PermissionSet':\n field_key = 'Label'\nelse:\n field_key = 'Name'\nfor record in self.sf.query_all(\"SELECT Id, {} FROM {} WHERE {} IN ('{}')\".format(field_key, sobject_type, field_key, \"','\".join(record_names)))['records']:\n record_name = record[field_key]\n parent_ids_by_name[record_name] = record['Id']\nreturn parent_ids_by_name", "if not record_names:\n return\nself.logger.info(f' {sobject_type}:')\nparent_ids_by_name = self._get_parent_ids_by_name(sobject_type, record_names)\nfor parent_name, parent_id in parent_ids_by_name.items():\n self._create_network_member_group(sobject_type, parent_name, parent_id)", "if not parent_id:\n raise CumulusCIException(f'No {sobject_type} record found with Name \"{parent_name}\"')\nif parent_id in self._parent_ids:\n self.logger.warning(f' Already exists for \"{parent_name}\"')\nelse:\n insert_response = self.sf.NetworkMemberGroup.create({'NetworkId': self._network_id, 'ParentId': parent_id})\n if insert_response.get('success') is True:\n self.logger.info(f' \"{parent_name}\"')\n else:\n raise SalesforceException(f'''Error creating NetworkMemberGroup for Network \"{self._network_id}\" for parent {sobject_type} \"{parent_name}\" {parent_id}. Errors: {', '.join(insert_response.get('errors') or [])}''')", "self._network_id = self._get_network_id(self.options['network_name'])\nself._parent_ids = self._get_network_member_group_parent_ids(self._network_id)\nfor sobject_type, record_names in {'Profile': process_list_arg(self.options.get('profile_names') or []), 'PermissionSet': process_list_arg(self.options.get('permission_set_names') or [])}.items():\n self._process_parent(sobject_type, record_names)"], "bodies_text": "<|body_start_0|>\n networks = self.sf.query_all(format_soql('SELECT Id FROM Network WHERE Name = {network_name} LIMIT 1', network_name=network_name))\n if not networks['records']:\n raise SalesforceException(f'No Network record found with Name \"{network_name}\"')\n self.logger.info(f'Creating NetworkMemberGroup records for {network_name} Network:')\n return networks['records'][0]['Id']\n<|end_body_0|>\n\n<|body_start_1|>\n network_member_group_parent_ids = set()\n for record in self.sf.query_all(f\"SELECT ParentId FROM NetworkMemberGroup WHERE NetworkId = '{network_id}'\")['records']:\n network_member_group_parent_ids.add(record['ParentId'])\n return network_member_group_parent_ids\n<|end_body_1|>\n\n<|body_start_2|>\n parent_ids_by_name = dict(((name, None) for name in record_names))\n if sobject_type == 'PermissionSet':\n field_key = 'Label'\n else:\n field_key = 'Name'\n for record in self.sf.query_all(\"SELECT Id, {} FROM {} WHERE {} IN ('{}')\".format(field_key, sobject_type, field_key, \"','\".join(record_names)))['records']:\n record_name = record[field_key]\n parent_ids_by_name[record_name] = record['Id']\n return parent_ids_by_name\n<|end_body_2|>\n\n<|body_start_3|>\n if not record_names:\n return\n self.logger.info(f' {sobject_type}:')\n parent_ids_by_name = self._get_parent_ids_by_name(sobject_type, record_names)\n for parent_name, parent_id in parent_ids_by_name.items():\n self._create_network_member_group(sobject_type, parent_name, parent_id)\n<|end_body_3|>\n\n<|body_start_4|>\n if not parent_id:\n raise CumulusCIException(f'No {sobject_type} record found with Name \"{parent_name}\"')\n if parent_id in self._parent_ids:\n self.logger.warning(f' Already exists for \"{parent_name}\"')\n else:\n insert_response = self.sf.NetworkMemberGroup.create({'NetworkId': self._network_id, 'ParentId': parent_id})\n if insert_response.get('success') is True:\n self.logger.info(f' \"{parent_name}\"')\n else:\n raise SalesforceException(f'''Error creating NetworkMemberGroup for Network \"{self._network_id}\" for parent {sobject_type} \"{parent_name}\" {parent_id}. Errors: {', '.join(insert_response.get('errors') or [])}''')\n<|end_body_4|>\n\n<|body_start_5|>\n self._network_id = self._get_network_id(self.options['network_name'])\n self._parent_ids = self._get_network_member_group_parent_ids(self._network_id)\n for sobject_type, record_names in {'Profile': process_list_arg(self.options.get('profile_names') or []), 'PermissionSet': process_list_arg(self.options.get('permission_set_names') or [])}.items():\n self._process_parent(sobject_type, record_names)\n<|end_body_5|>\n", "class_docstring": "Creates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names", "class_name": "CreateNetworkMemberGroups", "detected_licenses": ["LicenseRef-scancode-free-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateNetworkMemberGroups:\n \"\"\"Creates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\"\"\"\n\n def _get_network_id(self, network_name: str) -> str:\n \"\"\"Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\"\"\"\n <|body_0|>\n\n def _get_network_member_group_parent_ids(self, network_id) -> set:\n \"\"\"Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\"\"\"\n <|body_1|>\n\n def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]):\n \"\"\"Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\"\"\"\n <|body_2|>\n\n def _process_parent(self, sobject_type, record_names) -> None:\n \"\"\"For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\"\"\"\n <|body_3|>\n\n def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None:\n \"\"\"Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\"\"\"\n <|body_4|>\n\n def _run_task(self):\n \"\"\"Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n networks = self.sf.query_all(format_soql('SELECT Id FROM Network WHERE Name = {network_name} LIMIT 1', network_name=network_name))\n if not networks['records']:\n raise SalesforceException(f'No Network record found with Name \"{network_name}\"')\n self.logger.info(f'Creating NetworkMemberGroup records for {network_name} Network:')\n return networks['records'][0]['Id']\n<|end_body_0|>\n\n<|body_start_1|>\n network_member_group_parent_ids = set()\n for record in self.sf.query_all(f\"SELECT ParentId FROM NetworkMemberGroup WHERE NetworkId = '{network_id}'\")['records']:\n network_member_group_parent_ids.add(record['ParentId'])\n return network_member_group_parent_ids\n<|end_body_1|>\n\n<|body_start_2|>\n parent_ids_by_name = dict(((name, None) for name in record_names))\n if sobject_type == 'PermissionSet':\n field_key = 'Label'\n else:\n field_key = 'Name'\n for record in self.sf.query_all(\"SELECT Id, {} FROM {} WHERE {} IN ('{}')\".format(field_key, sobject_type, field_key, \"','\".join(record_names)))['records']:\n record_name = record[field_key]\n parent_ids_by_name[record_name] = record['Id']\n return parent_ids_by_name\n<|end_body_2|>\n\n<|body_start_3|>\n if not record_names:\n return\n self.logger.info(f' {sobject_type}:')\n parent_ids_by_name = self._get_parent_ids_by_name(sobject_type, record_names)\n for parent_name, parent_id in parent_ids_by_name.items():\n self._create_network_member_group(sobject_type, parent_name, parent_id)\n<|end_body_3|>\n\n<|body_start_4|>\n if not parent_id:\n raise CumulusCIException(f'No {sobject_type} record found with Name \"{parent_name}\"')\n if parent_id in self._parent_ids:\n self.logger.warning(f' Already exists for \"{parent_name}\"')\n else:\n insert_response = self.sf.NetworkMemberGroup.create({'NetworkId': self._network_id, 'ParentId': parent_id})\n if insert_response.get('success') is True:\n self.logger.info(f' \"{parent_name}\"')\n else:\n raise SalesforceException(f'''Error creating NetworkMemberGroup for Network \"{self._network_id}\" for parent {sobject_type} \"{parent_name}\" {parent_id}. Errors: {', '.join(insert_response.get('errors') or [])}''')\n<|end_body_4|>\n\n<|body_start_5|>\n self._network_id = self._get_network_id(self.options['network_name'])\n self._parent_ids = self._get_network_member_group_parent_ids(self._network_id)\n for sobject_type, record_names in {'Profile': process_list_arg(self.options.get('profile_names') or []), 'PermissionSet': process_list_arg(self.options.get('permission_set_names') or [])}.items():\n self._process_parent(sobject_type, record_names)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000040", "length_bytes": 6853, "license_type": "permissive", "methods": [{"docstring": "Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.", "name": "_get_network_id", "signature": "def _get_network_id(self, network_name: str) -> str"}, {"docstring": "Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.", "name": "_get_network_member_group_parent_ids", "signature": "def _get_network_member_group_parent_ids(self, network_id) -> set"}, {"docstring": "Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.", "name": "_get_parent_ids_by_name", "signature": "def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str])"}, {"docstring": "For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.", "name": "_process_parent", "signature": "def _process_parent(self, sobject_type, record_names) -> None"}, {"docstring": "Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.", "name": "_create_network_member_group", "signature": "def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None"}, {"docstring": "Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.", "name": "_run_task", "signature": "def _run_task(self)"}], "n_methods": 6, "prompt": "Implement the Python class `CreateNetworkMemberGroups` described below.\n\nClass description:\nCreates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\n\nMethod signatures and docstrings:\n- def _get_network_id(self, network_name: str) -> str: Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\n- def _get_network_member_group_parent_ids(self, network_id) -> set: Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\n- def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]): Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\n- def _process_parent(self, sobject_type, record_names) -> None: For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\n- def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None: Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\n- def _run_task(self): Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.", "prompted_full_text": "Implement the Python class `CreateNetworkMemberGroups` described below.\n\nClass description:\nCreates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\n\nMethod signatures and docstrings:\n- def _get_network_id(self, network_name: str) -> str: Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\n- def _get_network_member_group_parent_ids(self, network_id) -> set: Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\n- def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]): Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\n- def _process_parent(self, sobject_type, record_names) -> None: For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\n- def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None: Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\n- def _run_task(self): Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.\n\n<|skeleton|>\nclass CreateNetworkMemberGroups:\n \"\"\"Creates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\"\"\"\n\n def _get_network_id(self, network_name: str) -> str:\n \"\"\"Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\"\"\"\n <|body_0|>\n\n def _get_network_member_group_parent_ids(self, network_id) -> set:\n \"\"\"Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\"\"\"\n <|body_1|>\n\n def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]):\n \"\"\"Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\"\"\"\n <|body_2|>\n\n def _process_parent(self, sobject_type, record_names) -> None:\n \"\"\"For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\"\"\"\n <|body_3|>\n\n def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None:\n \"\"\"Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\"\"\"\n <|body_4|>\n\n def _run_task(self):\n \"\"\"Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n networks = self.sf.query_all(format_soql('SELECT Id FROM Network WHERE Name = {network_name} LIMIT 1', network_name=network_name))\n if not networks['records']:\n raise SalesforceException(f'No Network record found with Name \"{network_name}\"')\n self.logger.info(f'Creating NetworkMemberGroup records for {network_name} Network:')\n return networks['records'][0]['Id']\n<|end_body_0|>\n\n<|body_start_1|>\n network_member_group_parent_ids = set()\n for record in self.sf.query_all(f\"SELECT ParentId FROM NetworkMemberGroup WHERE NetworkId = '{network_id}'\")['records']:\n network_member_group_parent_ids.add(record['ParentId'])\n return network_member_group_parent_ids\n<|end_body_1|>\n\n<|body_start_2|>\n parent_ids_by_name = dict(((name, None) for name in record_names))\n if sobject_type == 'PermissionSet':\n field_key = 'Label'\n else:\n field_key = 'Name'\n for record in self.sf.query_all(\"SELECT Id, {} FROM {} WHERE {} IN ('{}')\".format(field_key, sobject_type, field_key, \"','\".join(record_names)))['records']:\n record_name = record[field_key]\n parent_ids_by_name[record_name] = record['Id']\n return parent_ids_by_name\n<|end_body_2|>\n\n<|body_start_3|>\n if not record_names:\n return\n self.logger.info(f' {sobject_type}:')\n parent_ids_by_name = self._get_parent_ids_by_name(sobject_type, record_names)\n for parent_name, parent_id in parent_ids_by_name.items():\n self._create_network_member_group(sobject_type, parent_name, parent_id)\n<|end_body_3|>\n\n<|body_start_4|>\n if not parent_id:\n raise CumulusCIException(f'No {sobject_type} record found with Name \"{parent_name}\"')\n if parent_id in self._parent_ids:\n self.logger.warning(f' Already exists for \"{parent_name}\"')\n else:\n insert_response = self.sf.NetworkMemberGroup.create({'NetworkId': self._network_id, 'ParentId': parent_id})\n if insert_response.get('success') is True:\n self.logger.info(f' \"{parent_name}\"')\n else:\n raise SalesforceException(f'''Error creating NetworkMemberGroup for Network \"{self._network_id}\" for parent {sobject_type} \"{parent_name}\" {parent_id}. Errors: {', '.join(insert_response.get('errors') or [])}''')\n<|end_body_4|>\n\n<|body_start_5|>\n self._network_id = self._get_network_id(self.options['network_name'])\n self._parent_ids = self._get_network_member_group_parent_ids(self._network_id)\n for sobject_type, record_names in {'Profile': process_list_arg(self.options.get('profile_names') or []), 'PermissionSet': process_list_arg(self.options.get('permission_set_names') or [])}.items():\n self._process_parent(sobject_type, record_names)\n<|end_body_5|>\n", "revision_id": "9ccf3c9566f78c6e9102ac214db30470cef660c1", "skeleton": "<|skeleton|>\nclass CreateNetworkMemberGroups:\n \"\"\"Creates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\"\"\"\n\n def _get_network_id(self, network_name: str) -> str:\n \"\"\"Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\"\"\"\n <|body_0|>\n\n def _get_network_member_group_parent_ids(self, network_id) -> set:\n \"\"\"Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\"\"\"\n <|body_1|>\n\n def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]):\n \"\"\"Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\"\"\"\n <|body_2|>\n\n def _process_parent(self, sobject_type, record_names) -> None:\n \"\"\"For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\"\"\"\n <|body_3|>\n\n def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None:\n \"\"\"Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\"\"\"\n <|body_4|>\n\n def _run_task(self):\n \"\"\"Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CreateNetworkMemberGroups:\n \"\"\"Creates NetworkMemberGroup for a Network (Experience Site) for Profiles and Permission Sets that don't already have a corresponding NetworkMemberGroup. Raises exceptions if records cannot be found: - Network with Name network_name - Profiles with Names in profile_names - Permission Sets with Names in permission_set_names\"\"\"\n\n def _get_network_id(self, network_name: str) -> str:\n \"\"\"Returns Id of Network record with Name network_name. Raises a SalesforceException if no Network is found.\"\"\"\n networks = self.sf.query_all(format_soql('SELECT Id FROM Network WHERE Name = {network_name} LIMIT 1', network_name=network_name))\n if not networks['records']:\n raise SalesforceException(f'No Network record found with Name \"{network_name}\"')\n self.logger.info(f'Creating NetworkMemberGroup records for {network_name} Network:')\n return networks['records'][0]['Id']\n\n def _get_network_member_group_parent_ids(self, network_id) -> set:\n \"\"\"Collect existing NetworkMemberGroup Parent IDs (associated Profile or Permission Set ID). An excpetion is thrown trying to create a NetworkMemberGroup for a parent who already has a record.\"\"\"\n network_member_group_parent_ids = set()\n for record in self.sf.query_all(f\"SELECT ParentId FROM NetworkMemberGroup WHERE NetworkId = '{network_id}'\")['records']:\n network_member_group_parent_ids.add(record['ParentId'])\n return network_member_group_parent_ids\n\n def _get_parent_ids_by_name(self, sobject_type: str, record_names: List[str]):\n \"\"\"Returns a Dict: Name --> ID of records with Name in record_names for sObject_type. Dict value are None for all record_names that do not have corresponding records.\"\"\"\n parent_ids_by_name = dict(((name, None) for name in record_names))\n if sobject_type == 'PermissionSet':\n field_key = 'Label'\n else:\n field_key = 'Name'\n for record in self.sf.query_all(\"SELECT Id, {} FROM {} WHERE {} IN ('{}')\".format(field_key, sobject_type, field_key, \"','\".join(record_names)))['records']:\n record_name = record[field_key]\n parent_ids_by_name[record_name] = record['Id']\n return parent_ids_by_name\n\n def _process_parent(self, sobject_type, record_names) -> None:\n \"\"\"For a specific sobject_type and record_names, queries all Salesforce IDs corresponding to records of SObjectType sobject_type with Name in record_names. Then, tries to create NetworkMemberGroup for each parent in record_names.\"\"\"\n if not record_names:\n return\n self.logger.info(f' {sobject_type}:')\n parent_ids_by_name = self._get_parent_ids_by_name(sobject_type, record_names)\n for parent_name, parent_id in parent_ids_by_name.items():\n self._create_network_member_group(sobject_type, parent_name, parent_id)\n\n def _create_network_member_group(self, sobject_type, parent_name, parent_id) -> None:\n \"\"\"Processes and logs creating a NetworkMemberGroup for a specific parent. Outcomes: - Raises a CumulusCIException if record_id is None meaning no corresponding record was found in _get_parent_ids_by_name. - Logs a warning that a NetworkMemberGroup already exists is parent_id is in self._parent_ids. - Creates a NetworkMemberGroup for parent_id and logs the result.\"\"\"\n if not parent_id:\n raise CumulusCIException(f'No {sobject_type} record found with Name \"{parent_name}\"')\n if parent_id in self._parent_ids:\n self.logger.warning(f' Already exists for \"{parent_name}\"')\n else:\n insert_response = self.sf.NetworkMemberGroup.create({'NetworkId': self._network_id, 'ParentId': parent_id})\n if insert_response.get('success') is True:\n self.logger.info(f' \"{parent_name}\"')\n else:\n raise SalesforceException(f'''Error creating NetworkMemberGroup for Network \"{self._network_id}\" for parent {sobject_type} \"{parent_name}\" {parent_id}. Errors: {', '.join(insert_response.get('errors') or [])}''')\n\n def _run_task(self):\n \"\"\"Gets required information then tries to create NetworkMemberGroups for Profiles and Permission Sets cooresponding to profile_names and permission_set_names respectively.\"\"\"\n self._network_id = self._get_network_id(self.options['network_name'])\n self._parent_ids = self._get_network_member_group_parent_ids(self._network_id)\n for sobject_type, record_names in {'Profile': process_list_arg(self.options.get('profile_names') or []), 'PermissionSet': process_list_arg(self.options.get('permission_set_names') or [])}.items():\n self._process_parent(sobject_type, record_names)\n", "source": "the_stack_v2_python_sparse", "source_path": "cumulusci/tasks/salesforce/network_member_group.py", "source_repo": "SFDO-Tooling/CumulusCI", "split": "val", "star_events_count": 226} {"blob_id": "f645543120c50c0703a92307df4f470ea97edac9", "bodies": ["try:\n j = nums.index(0)\nexcept ValueError:\n return\nnums_len = len(nums)\ni = j + 1\nwhile i < nums_len and nums[i] == 0:\n i += 1\nwhile i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1", "i = 0\nfor num in nums:\n if num != 0:\n nums[i] = num\n i += 1\nwhile i < len(nums):\n nums[i] = 0\n i += 1"], "bodies_text": "<|body_start_0|>\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n while i < len(nums):\n nums[i] = 0\n i += 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n while i < len(nums):\n nums[i] = 0\n i += 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000041", "length_bytes": 882, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify nums in-place instead.", "name": "moveZeroes", "signature": "def moveZeroes(self, nums: List[int]) -> None"}, {"docstring": "Do not return anything, modify nums in-place instead.", "name": "moveZeroes2", "signature": "def moveZeroes2(self, nums: List[int]) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006363", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n- def moveZeroes2(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n- def moveZeroes2(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n while i < len(nums):\n nums[i] = 0\n i += 1\n<|end_body_1|>\n", "revision_id": "0d1015717666ad78d4cf79df51b606feb307c908", "skeleton": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n while i < len(nums):\n nums[i] = 0\n i += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/Interview questions/Easy/Array/8__567_Move_Zeroes/solution.py", "source_repo": "aleksanderprofic/Problem-Solving", "split": "val", "star_events_count": 0} {"blob_id": "4c55d1d11be9471bd623e486fc8554279b5ba68d", "bodies": ["super().__init__(expected=(source,))\nself.temperature = temperature\nself.threshold = threshold", "strengths, = self.extract_inputs(inputs)\nthresholded = nd.threshold(strengths, th=self.threshold)\nprobabilities = nd.boltzmann(thresholded, self.temperature)\nd = nd.draw(probabilities, n=1)\nd = nd.with_default(d, default=0)\nreturn d"], "bodies_text": "<|body_start_0|>\n super().__init__(expected=(source,))\n self.temperature = temperature\n self.threshold = threshold\n<|end_body_0|>\n\n<|body_start_1|>\n strengths, = self.extract_inputs(inputs)\n thresholded = nd.threshold(strengths, th=self.threshold)\n probabilities = nd.boltzmann(thresholded, self.temperature)\n d = nd.draw(probabilities, n=1)\n d = nd.with_default(d, default=0)\n return d\n<|end_body_1|>\n", "class_docstring": "Selects a chunk according to a Boltzmann distribution.", "class_name": "BoltzmannSelector", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BoltzmannSelector:\n \"\"\"Selects a chunk according to a Boltzmann distribution.\"\"\"\n\n def __init__(self, source, temperature=0.01, threshold=0.25):\n \"\"\"Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(expected=(source,))\n self.temperature = temperature\n self.threshold = threshold\n<|end_body_0|>\n\n<|body_start_1|>\n strengths, = self.extract_inputs(inputs)\n thresholded = nd.threshold(strengths, th=self.threshold)\n probabilities = nd.boltzmann(thresholded, self.temperature)\n d = nd.draw(probabilities, n=1)\n d = nd.with_default(d, default=0)\n return d\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000042", "length_bytes": 7721, "license_type": "permissive", "methods": [{"docstring": "Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.", "name": "__init__", "signature": "def __init__(self, source, temperature=0.01, threshold=0.25)"}, {"docstring": "Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.", "name": "call", "signature": "def call(self, inputs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002196", "prompt": "Implement the Python class `BoltzmannSelector` described below.\n\nClass description:\nSelects a chunk according to a Boltzmann distribution.\n\nMethod signatures and docstrings:\n- def __init__(self, source, temperature=0.01, threshold=0.25): Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\n- def call(self, inputs): Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.", "prompted_full_text": "Implement the Python class `BoltzmannSelector` described below.\n\nClass description:\nSelects a chunk according to a Boltzmann distribution.\n\nMethod signatures and docstrings:\n- def __init__(self, source, temperature=0.01, threshold=0.25): Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\n- def call(self, inputs): Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.\n\n<|skeleton|>\nclass BoltzmannSelector:\n \"\"\"Selects a chunk according to a Boltzmann distribution.\"\"\"\n\n def __init__(self, source, temperature=0.01, threshold=0.25):\n \"\"\"Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(expected=(source,))\n self.temperature = temperature\n self.threshold = threshold\n<|end_body_0|>\n\n<|body_start_1|>\n strengths, = self.extract_inputs(inputs)\n thresholded = nd.threshold(strengths, th=self.threshold)\n probabilities = nd.boltzmann(thresholded, self.temperature)\n d = nd.draw(probabilities, n=1)\n d = nd.with_default(d, default=0)\n return d\n<|end_body_1|>\n", "revision_id": "d8ff4c545785ec6cddc989dded9c1a9d3dd91514", "skeleton": "<|skeleton|>\nclass BoltzmannSelector:\n \"\"\"Selects a chunk according to a Boltzmann distribution.\"\"\"\n\n def __init__(self, source, temperature=0.01, threshold=0.25):\n \"\"\"Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BoltzmannSelector:\n \"\"\"Selects a chunk according to a Boltzmann distribution.\"\"\"\n\n def __init__(self, source, temperature=0.01, threshold=0.25):\n \"\"\"Initialize a ``BoltzmannSelector`` instance. :param temperature: Temperature of the Boltzmann distribution.\"\"\"\n super().__init__(expected=(source,))\n self.temperature = temperature\n self.threshold = threshold\n\n def call(self, inputs):\n \"\"\"Select chunks through an activation-based competition. Selection probabilities vary with chunk strengths according to a Boltzmann distribution.\"\"\"\n strengths, = self.extract_inputs(inputs)\n thresholded = nd.threshold(strengths, th=self.threshold)\n probabilities = nd.boltzmann(thresholded, self.temperature)\n d = nd.draw(probabilities, n=1)\n d = nd.with_default(d, default=0)\n return d\n", "source": "the_stack_v2_python_sparse", "source_path": "pyClarion/components/propagators.py", "source_repo": "HZeng3/pyClarion", "split": "val", "star_events_count": 0} {"blob_id": "83e36b97895007fc65a9883476f8c6b25e9b9095", "bodies": ["self.drive_vec = drive_vec\nself.object = object\nself.parent_site = parent_site", "if dictionary is None:\n return None\ndrive_vec = None\nif dictionary.get('driveVec') != None:\n drive_vec = list()\n for structure in dictionary.get('driveVec'):\n drive_vec.append(cohesity_management_sdk.models.restore_site_params_site_owner_drive.RestoreSiteParams_SiteOwner_Drive.from_dictionary(structure))\nobject = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('object')) if dictionary.get('object') else None\nparent_site = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('parentSite')) if dictionary.get('parentSite') else None\nreturn cls(drive_vec, object, parent_site)"], "bodies_text": "<|body_start_0|>\n self.drive_vec = drive_vec\n self.object = object\n self.parent_site = parent_site\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n drive_vec = None\n if dictionary.get('driveVec') != None:\n drive_vec = list()\n for structure in dictionary.get('driveVec'):\n drive_vec.append(cohesity_management_sdk.models.restore_site_params_site_owner_drive.RestoreSiteParams_SiteOwner_Drive.from_dictionary(structure))\n object = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('object')) if dictionary.get('object') else None\n parent_site = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('parentSite')) if dictionary.get('parentSite') else None\n return cls(drive_vec, object, parent_site)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.", "class_name": "RestoreSiteParams_SiteOwner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RestoreSiteParams_SiteOwner:\n \"\"\"Implementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\"\"\"\n\n def __init__(self, drive_vec=None, object=None, parent_site=None):\n \"\"\"Constructor for the RestoreSiteParams_SiteOwner class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.drive_vec = drive_vec\n self.object = object\n self.parent_site = parent_site\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n drive_vec = None\n if dictionary.get('driveVec') != None:\n drive_vec = list()\n for structure in dictionary.get('driveVec'):\n drive_vec.append(cohesity_management_sdk.models.restore_site_params_site_owner_drive.RestoreSiteParams_SiteOwner_Drive.from_dictionary(structure))\n object = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('object')) if dictionary.get('object') else None\n parent_site = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('parentSite')) if dictionary.get('parentSite') else None\n return cls(drive_vec, object, parent_site)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000043", "length_bytes": 2656, "license_type": "permissive", "methods": [{"docstring": "Constructor for the RestoreSiteParams_SiteOwner class", "name": "__init__", "signature": "def __init__(self, drive_vec=None, object=None, parent_site=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `RestoreSiteParams_SiteOwner` described below.\n\nClass description:\nImplementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\n\nMethod signatures and docstrings:\n- def __init__(self, drive_vec=None, object=None, parent_site=None): Constructor for the RestoreSiteParams_SiteOwner class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `RestoreSiteParams_SiteOwner` described below.\n\nClass description:\nImplementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\n\nMethod signatures and docstrings:\n- def __init__(self, drive_vec=None, object=None, parent_site=None): Constructor for the RestoreSiteParams_SiteOwner class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass RestoreSiteParams_SiteOwner:\n \"\"\"Implementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\"\"\"\n\n def __init__(self, drive_vec=None, object=None, parent_site=None):\n \"\"\"Constructor for the RestoreSiteParams_SiteOwner class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.drive_vec = drive_vec\n self.object = object\n self.parent_site = parent_site\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n drive_vec = None\n if dictionary.get('driveVec') != None:\n drive_vec = list()\n for structure in dictionary.get('driveVec'):\n drive_vec.append(cohesity_management_sdk.models.restore_site_params_site_owner_drive.RestoreSiteParams_SiteOwner_Drive.from_dictionary(structure))\n object = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('object')) if dictionary.get('object') else None\n parent_site = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('parentSite')) if dictionary.get('parentSite') else None\n return cls(drive_vec, object, parent_site)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass RestoreSiteParams_SiteOwner:\n \"\"\"Implementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\"\"\"\n\n def __init__(self, drive_vec=None, object=None, parent_site=None):\n \"\"\"Constructor for the RestoreSiteParams_SiteOwner class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RestoreSiteParams_SiteOwner:\n \"\"\"Implementation of the 'RestoreSiteParams_SiteOwner' model. TODO: type description here. Attributes: drive_vec (list of RestoreSiteParams_SiteOwner_Drive): The list of drives that are being restored. object (RestoreObject): This will store the details of the user whose drives is to be restored. parent_site (EntityProto): The entity representing the parent site if we are restoring a subsite.\"\"\"\n\n def __init__(self, drive_vec=None, object=None, parent_site=None):\n \"\"\"Constructor for the RestoreSiteParams_SiteOwner class\"\"\"\n self.drive_vec = drive_vec\n self.object = object\n self.parent_site = parent_site\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n drive_vec = None\n if dictionary.get('driveVec') != None:\n drive_vec = list()\n for structure in dictionary.get('driveVec'):\n drive_vec.append(cohesity_management_sdk.models.restore_site_params_site_owner_drive.RestoreSiteParams_SiteOwner_Drive.from_dictionary(structure))\n object = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('object')) if dictionary.get('object') else None\n parent_site = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('parentSite')) if dictionary.get('parentSite') else None\n return cls(drive_vec, object, parent_site)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/restore_site_params_site_owner.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "49e5220274d75bbb125b4b58d8416ae13a137503", "bodies": ["self.num_size = len(nums)\nself.sum_num = {0: 0}\nsum_num = 0\nfor i in range(self.num_size):\n sum_num += nums[i]\n self.sum_num[i + 1] = sum_num\nprint(self.sum_num)", "i = i if i > 0 else 0\nj = j if j < self.num_size else self.num_size\nreturn self.sum_num[j + 1] - self.sum_num[i]"], "bodies_text": "<|body_start_0|>\n self.num_size = len(nums)\n self.sum_num = {0: 0}\n sum_num = 0\n for i in range(self.num_size):\n sum_num += nums[i]\n self.sum_num[i + 1] = sum_num\n print(self.sum_num)\n<|end_body_0|>\n\n<|body_start_1|>\n i = i if i > 0 else 0\n j = j if j < self.num_size else self.num_size\n return self.sum_num[j + 1] - self.sum_num[i]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumArray", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def sumRange(self, i, j):\n \"\"\":type i: int :type j: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_size = len(nums)\n self.sum_num = {0: 0}\n sum_num = 0\n for i in range(self.num_size):\n sum_num += nums[i]\n self.sum_num[i + 1] = sum_num\n print(self.sum_num)\n<|end_body_0|>\n\n<|body_start_1|>\n i = i if i > 0 else 0\n j = j if j < self.num_size else self.num_size\n return self.sum_num[j + 1] - self.sum_num[i]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000044", "length_bytes": 1047, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int]", "name": "__init__", "signature": "def __init__(self, nums)"}, {"docstring": ":type i: int :type j: int :rtype: int", "name": "sumRange", "signature": "def sumRange(self, i, j)"}], "n_methods": 2, "prompt": "Implement the Python class `NumArray` described below.\n\nClass description:\nImplement the NumArray class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums): :type nums: List[int]\n- def sumRange(self, i, j): :type i: int :type j: int :rtype: int", "prompted_full_text": "Implement the Python class `NumArray` described below.\n\nClass description:\nImplement the NumArray class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums): :type nums: List[int]\n- def sumRange(self, i, j): :type i: int :type j: int :rtype: int\n\n<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def sumRange(self, i, j):\n \"\"\":type i: int :type j: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_size = len(nums)\n self.sum_num = {0: 0}\n sum_num = 0\n for i in range(self.num_size):\n sum_num += nums[i]\n self.sum_num[i + 1] = sum_num\n print(self.sum_num)\n<|end_body_0|>\n\n<|body_start_1|>\n i = i if i > 0 else 0\n j = j if j < self.num_size else self.num_size\n return self.sum_num[j + 1] - self.sum_num[i]\n<|end_body_1|>\n", "revision_id": "157cbaeeff74130e5105e58a6b4cdf66403a8a6f", "skeleton": "<|skeleton|>\nclass NumArray:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def sumRange(self, i, j):\n \"\"\":type i: int :type j: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NumArray:\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n self.num_size = len(nums)\n self.sum_num = {0: 0}\n sum_num = 0\n for i in range(self.num_size):\n sum_num += nums[i]\n self.sum_num[i + 1] = sum_num\n print(self.sum_num)\n\n def sumRange(self, i, j):\n \"\"\":type i: int :type j: int :rtype: int\"\"\"\n i = i if i > 0 else 0\n j = j if j < self.num_size else self.num_size\n return self.sum_num[j + 1] - self.sum_num[i]\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/303. Range Sum Query - Immutable.py", "source_repo": "xiaohuanlin/Algorithms", "split": "val", "star_events_count": 1} {"blob_id": "5f5c9015bfe5d81e4f9b193024f9317e73164efa", "bodies": ["if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = 'Unrecognised regrid mode {}'\n raise ValueError(msg.format(regrid_mode))\nif landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = 'Regrid mode {} requires an input landmask cube'\n raise ValueError(msg.format(regrid_mode))\nself.regrid_mode = regrid_mode\nself.extrapolation_mode = extrapolation_mode\nself.landmask_source_grid = landmask\nself.landmask_vicinity = None if landmask is None else landmask_vicinity\nself.landmask_name = 'land_binary_mask'", "if regrid_mode in ('nearest-with-mask', 'nearest-with-mask-2', 'bilinear-with-mask-2'):\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = 'Expected {} in input_landmask cube but found {}'.format(self.landmask_name, repr(self.landmask_source_grid))\n warnings.warn(msg)\n if self.landmask_name not in target_grid.name():\n msg = 'Expected {} in target_grid cube but found {}'.format(self.landmask_name, repr(target_grid))\n warnings.warn(msg)\nif regrid_mode in ('bilinear', 'nearest', 'nearest-with-mask'):\n if 'nearest' in regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n else:\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n if self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n cube = AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity, extrapolation_mode=self.extrapolation_mode)(cube, self.landmask_source_grid, target_grid)\nelif regrid_mode in ('nearest-2', 'nearest-with-mask-2', 'bilinear-2', 'bilinear-with-mask-2'):\n cube = RegridWithLandSeaMask(regrid_mode=regrid_mode, vicinity_radius=self.landmask_vicinity)(cube, self.landmask_source_grid, target_grid)\nrequired_grid_attributes = [attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES]\nfor key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\ncube.attributes['title'] = MANDATORY_ATTRIBUTE_DEFAULTS['title'] if regridded_title is None else regridded_title\nreturn cube", "if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError('Source landmask does not match input grid')\nreturn self._regrid_to_target(cube, target_grid, regridded_title, self.regrid_mode)"], "bodies_text": "<|body_start_0|>\n if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = 'Unrecognised regrid mode {}'\n raise ValueError(msg.format(regrid_mode))\n if landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = 'Regrid mode {} requires an input landmask cube'\n raise ValueError(msg.format(regrid_mode))\n self.regrid_mode = regrid_mode\n self.extrapolation_mode = extrapolation_mode\n self.landmask_source_grid = landmask\n self.landmask_vicinity = None if landmask is None else landmask_vicinity\n self.landmask_name = 'land_binary_mask'\n<|end_body_0|>\n\n<|body_start_1|>\n if regrid_mode in ('nearest-with-mask', 'nearest-with-mask-2', 'bilinear-with-mask-2'):\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = 'Expected {} in input_landmask cube but found {}'.format(self.landmask_name, repr(self.landmask_source_grid))\n warnings.warn(msg)\n if self.landmask_name not in target_grid.name():\n msg = 'Expected {} in target_grid cube but found {}'.format(self.landmask_name, repr(target_grid))\n warnings.warn(msg)\n if regrid_mode in ('bilinear', 'nearest', 'nearest-with-mask'):\n if 'nearest' in regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n else:\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n if self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n cube = AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity, extrapolation_mode=self.extrapolation_mode)(cube, self.landmask_source_grid, target_grid)\n elif regrid_mode in ('nearest-2', 'nearest-with-mask-2', 'bilinear-2', 'bilinear-with-mask-2'):\n cube = RegridWithLandSeaMask(regrid_mode=regrid_mode, vicinity_radius=self.landmask_vicinity)(cube, self.landmask_source_grid, target_grid)\n required_grid_attributes = [attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES]\n for key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\n cube.attributes['title'] = MANDATORY_ATTRIBUTE_DEFAULTS['title'] if regridded_title is None else regridded_title\n return cube\n<|end_body_1|>\n\n<|body_start_2|>\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError('Source landmask does not match input grid')\n return self._regrid_to_target(cube, target_grid, regridded_title, self.regrid_mode)\n<|end_body_2|>\n", "class_docstring": "Nearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.", "class_name": "RegridLandSea", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegridLandSea:\n \"\"\"Nearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\"\"\"\n\n def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000):\n \"\"\"Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\"\"\"\n <|body_0|>\n\n def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube:\n \"\"\"Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\"\"\"\n <|body_1|>\n\n def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube:\n \"\"\"Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = 'Unrecognised regrid mode {}'\n raise ValueError(msg.format(regrid_mode))\n if landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = 'Regrid mode {} requires an input landmask cube'\n raise ValueError(msg.format(regrid_mode))\n self.regrid_mode = regrid_mode\n self.extrapolation_mode = extrapolation_mode\n self.landmask_source_grid = landmask\n self.landmask_vicinity = None if landmask is None else landmask_vicinity\n self.landmask_name = 'land_binary_mask'\n<|end_body_0|>\n\n<|body_start_1|>\n if regrid_mode in ('nearest-with-mask', 'nearest-with-mask-2', 'bilinear-with-mask-2'):\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = 'Expected {} in input_landmask cube but found {}'.format(self.landmask_name, repr(self.landmask_source_grid))\n warnings.warn(msg)\n if self.landmask_name not in target_grid.name():\n msg = 'Expected {} in target_grid cube but found {}'.format(self.landmask_name, repr(target_grid))\n warnings.warn(msg)\n if regrid_mode in ('bilinear', 'nearest', 'nearest-with-mask'):\n if 'nearest' in regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n else:\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n if self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n cube = AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity, extrapolation_mode=self.extrapolation_mode)(cube, self.landmask_source_grid, target_grid)\n elif regrid_mode in ('nearest-2', 'nearest-with-mask-2', 'bilinear-2', 'bilinear-with-mask-2'):\n cube = RegridWithLandSeaMask(regrid_mode=regrid_mode, vicinity_radius=self.landmask_vicinity)(cube, self.landmask_source_grid, target_grid)\n required_grid_attributes = [attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES]\n for key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\n cube.attributes['title'] = MANDATORY_ATTRIBUTE_DEFAULTS['title'] if regridded_title is None else regridded_title\n return cube\n<|end_body_1|>\n\n<|body_start_2|>\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError('Source landmask does not match input grid')\n return self._regrid_to_target(cube, target_grid, regridded_title, self.regrid_mode)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000045", "length_bytes": 17882, "license_type": "permissive", "methods": [{"docstring": "Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.", "name": "__init__", "signature": "def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000)"}, {"docstring": "Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.", "name": "_regrid_to_target", "signature": "def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube"}, {"docstring": "Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.", "name": "process", "signature": "def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006761", "prompt": "Implement the Python class `RegridLandSea` described below.\n\nClass description:\nNearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\n\nMethod signatures and docstrings:\n- def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000): Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\n- def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube: Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\n- def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube: Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.", "prompted_full_text": "Implement the Python class `RegridLandSea` described below.\n\nClass description:\nNearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\n\nMethod signatures and docstrings:\n- def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000): Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\n- def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube: Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\n- def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube: Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.\n\n<|skeleton|>\nclass RegridLandSea:\n \"\"\"Nearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\"\"\"\n\n def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000):\n \"\"\"Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\"\"\"\n <|body_0|>\n\n def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube:\n \"\"\"Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\"\"\"\n <|body_1|>\n\n def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube:\n \"\"\"Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = 'Unrecognised regrid mode {}'\n raise ValueError(msg.format(regrid_mode))\n if landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = 'Regrid mode {} requires an input landmask cube'\n raise ValueError(msg.format(regrid_mode))\n self.regrid_mode = regrid_mode\n self.extrapolation_mode = extrapolation_mode\n self.landmask_source_grid = landmask\n self.landmask_vicinity = None if landmask is None else landmask_vicinity\n self.landmask_name = 'land_binary_mask'\n<|end_body_0|>\n\n<|body_start_1|>\n if regrid_mode in ('nearest-with-mask', 'nearest-with-mask-2', 'bilinear-with-mask-2'):\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = 'Expected {} in input_landmask cube but found {}'.format(self.landmask_name, repr(self.landmask_source_grid))\n warnings.warn(msg)\n if self.landmask_name not in target_grid.name():\n msg = 'Expected {} in target_grid cube but found {}'.format(self.landmask_name, repr(target_grid))\n warnings.warn(msg)\n if regrid_mode in ('bilinear', 'nearest', 'nearest-with-mask'):\n if 'nearest' in regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n else:\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n if self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n cube = AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity, extrapolation_mode=self.extrapolation_mode)(cube, self.landmask_source_grid, target_grid)\n elif regrid_mode in ('nearest-2', 'nearest-with-mask-2', 'bilinear-2', 'bilinear-with-mask-2'):\n cube = RegridWithLandSeaMask(regrid_mode=regrid_mode, vicinity_radius=self.landmask_vicinity)(cube, self.landmask_source_grid, target_grid)\n required_grid_attributes = [attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES]\n for key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\n cube.attributes['title'] = MANDATORY_ATTRIBUTE_DEFAULTS['title'] if regridded_title is None else regridded_title\n return cube\n<|end_body_1|>\n\n<|body_start_2|>\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError('Source landmask does not match input grid')\n return self._regrid_to_target(cube, target_grid, regridded_title, self.regrid_mode)\n<|end_body_2|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass RegridLandSea:\n \"\"\"Nearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\"\"\"\n\n def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000):\n \"\"\"Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\"\"\"\n <|body_0|>\n\n def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube:\n \"\"\"Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\"\"\"\n <|body_1|>\n\n def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube:\n \"\"\"Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RegridLandSea:\n \"\"\"Nearest-neighbour and bilinear regridding with or without land-sea mask awareness. When land-sea mask considered, surface-type-mismatched source points are excluded from field regridding calculation for target points. For example, for regridding a field using nearest-neighbour approach with land-sea awareness, regridded land points always take values from a land point on the source grid, and vice versa for sea points.\"\"\"\n\n def __init__(self, regrid_mode: str='bilinear', extrapolation_mode: str='nanmask', landmask: Optional[Cube]=None, landmask_vicinity: float=25000):\n \"\"\"Initialise regridding parameters. Args: regrid_mode: Mode of interpolation in regridding. Valid options are \"bilinear\", \"nearest\", \"nearest-with-mask\", \"bilinear-2\",\"nearest-2\", \"nearest-with-mask-2\" or \"bilinear-with-mask-2\". \"***-with-mask**\" option triggers adjustment of regridded points to match source points in terms of land / sea type. extrapolation_mode: Mode to fill regions outside the domain in regridding. landmask: Land-sea mask (\"land_binary_mask\") on the input cube grid, with land points set to one and sea points set to zero. Required for \"nearest-with-mask\" regridding option. landmask_vicinity: Radius of vicinity to search for a coastline, in metres.\"\"\"\n if regrid_mode not in self.REGRID_REQUIRES_LANDMASK:\n msg = 'Unrecognised regrid mode {}'\n raise ValueError(msg.format(regrid_mode))\n if landmask is None and self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n msg = 'Regrid mode {} requires an input landmask cube'\n raise ValueError(msg.format(regrid_mode))\n self.regrid_mode = regrid_mode\n self.extrapolation_mode = extrapolation_mode\n self.landmask_source_grid = landmask\n self.landmask_vicinity = None if landmask is None else landmask_vicinity\n self.landmask_name = 'land_binary_mask'\n\n def _regrid_to_target(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str], regrid_mode: str) -> Cube:\n \"\"\"Regrid cube to target_grid, inherit grid attributes and update title Args: cube: Cube to be regridded target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. regrid_mode: \"bilinear\",\"nearest\",\"nearest-with-mask\", \"nearest-2\",\"nearest-with-mask-2\",\"bilinear-2\",\"bilinear-with-mask-2\" Returns: Regridded cube with updated attributes.\"\"\"\n if regrid_mode in ('nearest-with-mask', 'nearest-with-mask-2', 'bilinear-with-mask-2'):\n if self.landmask_name not in self.landmask_source_grid.name():\n msg = 'Expected {} in input_landmask cube but found {}'.format(self.landmask_name, repr(self.landmask_source_grid))\n warnings.warn(msg)\n if self.landmask_name not in target_grid.name():\n msg = 'Expected {} in target_grid cube but found {}'.format(self.landmask_name, repr(target_grid))\n warnings.warn(msg)\n if regrid_mode in ('bilinear', 'nearest', 'nearest-with-mask'):\n if 'nearest' in regrid_mode:\n regridder = Nearest(extrapolation_mode=self.extrapolation_mode)\n else:\n regridder = Linear(extrapolation_mode=self.extrapolation_mode)\n cube = cube.regrid(target_grid, regridder)\n if self.REGRID_REQUIRES_LANDMASK[regrid_mode]:\n cube = AdjustLandSeaPoints(vicinity_radius=self.landmask_vicinity, extrapolation_mode=self.extrapolation_mode)(cube, self.landmask_source_grid, target_grid)\n elif regrid_mode in ('nearest-2', 'nearest-with-mask-2', 'bilinear-2', 'bilinear-with-mask-2'):\n cube = RegridWithLandSeaMask(regrid_mode=regrid_mode, vicinity_radius=self.landmask_vicinity)(cube, self.landmask_source_grid, target_grid)\n required_grid_attributes = [attr for attr in cube.attributes if attr in MOSG_GRID_ATTRIBUTES]\n for key in required_grid_attributes:\n if key in target_grid.attributes:\n cube.attributes[key] = target_grid.attributes[key]\n else:\n cube.attributes.pop(key)\n cube.attributes['title'] = MANDATORY_ATTRIBUTE_DEFAULTS['title'] if regridded_title is None else regridded_title\n return cube\n\n def process(self, cube: Cube, target_grid: Cube, regridded_title: Optional[str]=None) -> Cube:\n \"\"\"Regrids cube onto spatial grid provided by target_grid. Args: cube: Cube to be regridded. target_grid: Data on the target grid. If regridding with mask, this cube should contain land-sea mask data to be used in adjusting land and sea points after regridding. regridded_title: New value for the \"title\" attribute to be used after regridding. If not set, a default value is used. Returns: Regridded cube with updated attributes.\"\"\"\n if self.REGRID_REQUIRES_LANDMASK[self.regrid_mode]:\n if not grid_contains_cutout(self.landmask_source_grid, cube):\n raise ValueError('Source landmask does not match input grid')\n return self._regrid_to_target(cube, target_grid, regridded_title, self.regrid_mode)\n", "source": "the_stack_v2_python_sparse", "source_path": "improver/regrid/landsea.py", "source_repo": "metoppv/improver", "split": "val", "star_events_count": 101} {"blob_id": "e0d76d8e7de4146209052d64f5327b3b8011f036", "bodies": ["count = 0\nwhile n > 0:\n count += n & 1\n n >>= 1\nreturn count", "bits = []\nwhile n > 0:\n bits.append(n % 2)\n n = n // 2\nreturn sum(bits)"], "bodies_text": "<|body_start_0|>\n count = 0\n while n > 0:\n count += n & 1\n n >>= 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n bits = []\n while n > 0:\n bits.append(n % 2)\n n = n // 2\n return sum(bits)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def hammingWeight(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def hammingWeight_v2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n while n > 0:\n count += n & 1\n n >>= 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n bits = []\n while n > 0:\n bits.append(n % 2)\n n = n // 2\n return sum(bits)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000046", "length_bytes": 1446, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "hammingWeight", "signature": "def hammingWeight(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "hammingWeight_v2", "signature": "def hammingWeight_v2(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000944", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hammingWeight(self, n): :type n: int :rtype: int\n- def hammingWeight_v2(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hammingWeight(self, n): :type n: int :rtype: int\n- def hammingWeight_v2(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def hammingWeight(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def hammingWeight_v2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 0\n while n > 0:\n count += n & 1\n n >>= 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n bits = []\n while n > 0:\n bits.append(n % 2)\n n = n // 2\n return sum(bits)\n<|end_body_1|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def hammingWeight(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def hammingWeight_v2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def hammingWeight(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n count = 0\n while n > 0:\n count += n & 1\n n >>= 1\n return count\n\n def hammingWeight_v2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n bits = []\n while n > 0:\n bits.append(n % 2)\n n = n // 2\n return sum(bits)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_191.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0} {"blob_id": "2e75f3f70ab13799d3b163d4f2873035a0de5839", "bodies": ["self.text_color = text_color\nLabel.__init__(self, name, text, pygame.rect.Rect((0, 0), (0, 0)))\nreturn", "if self.text != self.cached_text:\n font_surface = BOLD_FONT.render(self.text, True, (0, 0, 0))\n target_surface = pygame.Surface(font_surface.get_rect().inflate(2, 2).size, flags=pygame.SRCALPHA)\n target_surface.blit(font_surface, (0, 0))\n target_surface.blit(font_surface, (1, 0))\n target_surface.blit(font_surface, (2, 0))\n target_surface.blit(font_surface, (0, 1))\n target_surface.blit(font_surface, (2, 1))\n target_surface.blit(font_surface, (0, 2))\n target_surface.blit(font_surface, (1, 2))\n target_surface.blit(font_surface, (2, 2))\n font_surface = BOLD_FONT.render(self.text, True, self.text_color)\n target_surface.blit(font_surface, (1, 1))\n center = self.rect.center\n self.image = target_surface\n self.rect = target_surface.get_rect()\n if center != (0, 0):\n self.rect.center = center\n self.last_rect = None\n self.cached_text = self.text\nreturn"], "bodies_text": "<|body_start_0|>\n self.text_color = text_color\n Label.__init__(self, name, text, pygame.rect.Rect((0, 0), (0, 0)))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self.text != self.cached_text:\n font_surface = BOLD_FONT.render(self.text, True, (0, 0, 0))\n target_surface = pygame.Surface(font_surface.get_rect().inflate(2, 2).size, flags=pygame.SRCALPHA)\n target_surface.blit(font_surface, (0, 0))\n target_surface.blit(font_surface, (1, 0))\n target_surface.blit(font_surface, (2, 0))\n target_surface.blit(font_surface, (0, 1))\n target_surface.blit(font_surface, (2, 1))\n target_surface.blit(font_surface, (0, 2))\n target_surface.blit(font_surface, (1, 2))\n target_surface.blit(font_surface, (2, 2))\n font_surface = BOLD_FONT.render(self.text, True, self.text_color)\n target_surface.blit(font_surface, (1, 1))\n center = self.rect.center\n self.image = target_surface\n self.rect = target_surface.get_rect()\n if center != (0, 0):\n self.rect.center = center\n self.last_rect = None\n self.cached_text = self.text\n return\n<|end_body_1|>\n", "class_docstring": "A Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.", "class_name": "OutlinedText", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OutlinedText:\n \"\"\"A Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\"\"\"\n\n def __init__(self, name, text, text_color=(255, 255, 255)):\n \"\"\"Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\"\"\"\n <|body_0|>\n\n def redraw(self):\n \"\"\"Redraw the Label if necessary.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.text_color = text_color\n Label.__init__(self, name, text, pygame.rect.Rect((0, 0), (0, 0)))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self.text != self.cached_text:\n font_surface = BOLD_FONT.render(self.text, True, (0, 0, 0))\n target_surface = pygame.Surface(font_surface.get_rect().inflate(2, 2).size, flags=pygame.SRCALPHA)\n target_surface.blit(font_surface, (0, 0))\n target_surface.blit(font_surface, (1, 0))\n target_surface.blit(font_surface, (2, 0))\n target_surface.blit(font_surface, (0, 1))\n target_surface.blit(font_surface, (2, 1))\n target_surface.blit(font_surface, (0, 2))\n target_surface.blit(font_surface, (1, 2))\n target_surface.blit(font_surface, (2, 2))\n font_surface = BOLD_FONT.render(self.text, True, self.text_color)\n target_surface.blit(font_surface, (1, 1))\n center = self.rect.center\n self.image = target_surface\n self.rect = target_surface.get_rect()\n if center != (0, 0):\n self.rect.center = center\n self.last_rect = None\n self.cached_text = self.text\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000047", "length_bytes": 27668, "license_type": "permissive", "methods": [{"docstring": "Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.", "name": "__init__", "signature": "def __init__(self, name, text, text_color=(255, 255, 255))"}, {"docstring": "Redraw the Label if necessary.", "name": "redraw", "signature": "def redraw(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000212", "prompt": "Implement the Python class `OutlinedText` described below.\n\nClass description:\nA Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\n\nMethod signatures and docstrings:\n- def __init__(self, name, text, text_color=(255, 255, 255)): Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\n- def redraw(self): Redraw the Label if necessary.", "prompted_full_text": "Implement the Python class `OutlinedText` described below.\n\nClass description:\nA Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\n\nMethod signatures and docstrings:\n- def __init__(self, name, text, text_color=(255, 255, 255)): Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\n- def redraw(self): Redraw the Label if necessary.\n\n<|skeleton|>\nclass OutlinedText:\n \"\"\"A Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\"\"\"\n\n def __init__(self, name, text, text_color=(255, 255, 255)):\n \"\"\"Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\"\"\"\n <|body_0|>\n\n def redraw(self):\n \"\"\"Redraw the Label if necessary.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.text_color = text_color\n Label.__init__(self, name, text, pygame.rect.Rect((0, 0), (0, 0)))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self.text != self.cached_text:\n font_surface = BOLD_FONT.render(self.text, True, (0, 0, 0))\n target_surface = pygame.Surface(font_surface.get_rect().inflate(2, 2).size, flags=pygame.SRCALPHA)\n target_surface.blit(font_surface, (0, 0))\n target_surface.blit(font_surface, (1, 0))\n target_surface.blit(font_surface, (2, 0))\n target_surface.blit(font_surface, (0, 1))\n target_surface.blit(font_surface, (2, 1))\n target_surface.blit(font_surface, (0, 2))\n target_surface.blit(font_surface, (1, 2))\n target_surface.blit(font_surface, (2, 2))\n font_surface = BOLD_FONT.render(self.text, True, self.text_color)\n target_surface.blit(font_surface, (1, 1))\n center = self.rect.center\n self.image = target_surface\n self.rect = target_surface.get_rect()\n if center != (0, 0):\n self.rect.center = center\n self.last_rect = None\n self.cached_text = self.text\n return\n<|end_body_1|>\n", "revision_id": "c2fc3d4e9beedb8487cfa4bfa13bdf55ec36af97", "skeleton": "<|skeleton|>\nclass OutlinedText:\n \"\"\"A Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\"\"\"\n\n def __init__(self, name, text, text_color=(255, 255, 255)):\n \"\"\"Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\"\"\"\n <|body_0|>\n\n def redraw(self):\n \"\"\"Redraw the Label if necessary.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OutlinedText:\n \"\"\"A Label with outlined text and a transparent background. Additional attributes: OutlinedText.text_color A tuple (R, G, B) holding the color of the text.\"\"\"\n\n def __init__(self, name, text, text_color=(255, 255, 255)):\n \"\"\"Initialise the OutlinedText. text is the text to be written on the Label. If text is None, it is replaced by an empty string.\"\"\"\n self.text_color = text_color\n Label.__init__(self, name, text, pygame.rect.Rect((0, 0), (0, 0)))\n return\n\n def redraw(self):\n \"\"\"Redraw the Label if necessary.\"\"\"\n if self.text != self.cached_text:\n font_surface = BOLD_FONT.render(self.text, True, (0, 0, 0))\n target_surface = pygame.Surface(font_surface.get_rect().inflate(2, 2).size, flags=pygame.SRCALPHA)\n target_surface.blit(font_surface, (0, 0))\n target_surface.blit(font_surface, (1, 0))\n target_surface.blit(font_surface, (2, 0))\n target_surface.blit(font_surface, (0, 1))\n target_surface.blit(font_surface, (2, 1))\n target_surface.blit(font_surface, (0, 2))\n target_surface.blit(font_surface, (1, 2))\n target_surface.blit(font_surface, (2, 2))\n font_surface = BOLD_FONT.render(self.text, True, self.text_color)\n target_surface.blit(font_surface, (1, 1))\n center = self.rect.center\n self.image = target_surface\n self.rect = target_surface.get_rect()\n if center != (0, 0):\n self.rect.center = center\n self.last_rect = None\n self.cached_text = self.text\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "reference_scripts/clickndrag-0.4.1/clickndrag/gui.py", "source_repo": "stivosaurus/rpi-snippets", "split": "val", "star_events_count": 1} {"blob_id": "dd7e00413d68a10694d6948d05a646c9fcb96217", "bodies": ["pins = pins or PINS\nself.touch = []\nfor gpio in pins:\n self.touch.append(Button(gpio))", "for item in self.touch:\n if item.is_pressed:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "Sensor class", "class_name": "Sensor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000048", "length_bytes": 1508, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, pins=None)"}, {"docstring": "Check sensor state", "name": "check_sensor", "signature": "def check_sensor(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002365", "prompt": "Implement the Python class `Sensor` described below.\n\nClass description:\nSensor class\n\nMethod signatures and docstrings:\n- def __init__(self, pins=None): Constructor\n- def check_sensor(self): Check sensor state", "prompted_full_text": "Implement the Python class `Sensor` described below.\n\nClass description:\nSensor class\n\nMethod signatures and docstrings:\n- def __init__(self, pins=None): Constructor\n- def check_sensor(self): Check sensor state\n\n<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "revision_id": "cfba2860145978904d1dd427f2326efeccfc561a", "skeleton": "<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "chapter_12/avoidance.py", "source_repo": "packtjaniceg/Raspberry-Pi-4-Cookbook-for-Python-Programmers-Fourth-Edition", "split": "val", "star_events_count": 0} {"blob_id": "019acd180f9ea97c0406f9698e498a47565b3660", "bodies": ["super().__init__()\nself.orig_obs_space = obs_space\nself.embedding_size = self.orig_obs_space['doc']['0'].shape[0]\nself.num_candidates = len(self.orig_obs_space['doc'])\nassert self.orig_obs_space['user'].shape[0] == self.embedding_size\nself.q_nets = nn.ModuleList()\nfor i in range(self.num_candidates):\n layers = nn.Sequential()\n ins = 2 * self.embedding_size\n for j, h in enumerate(fcnet_hiddens_per_candidate):\n layers.add_module(f'q_layer_{i}_{j}', SlimFC(in_size=ins, out_size=h, activation_fn='relu'))\n ins = h\n layers.add_module(f'q_out_{i}', SlimFC(ins, 1, activation_fn=None))\n self.q_nets.append(layers)", "q_outs = []\nfor i in range(self.num_candidates):\n user_cat_doc = torch.cat([user, docs[i]], dim=1)\n q_outs.append(self.q_nets[i](user_cat_doc))\nreturn torch.cat(q_outs, dim=1)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.orig_obs_space = obs_space\n self.embedding_size = self.orig_obs_space['doc']['0'].shape[0]\n self.num_candidates = len(self.orig_obs_space['doc'])\n assert self.orig_obs_space['user'].shape[0] == self.embedding_size\n self.q_nets = nn.ModuleList()\n for i in range(self.num_candidates):\n layers = nn.Sequential()\n ins = 2 * self.embedding_size\n for j, h in enumerate(fcnet_hiddens_per_candidate):\n layers.add_module(f'q_layer_{i}_{j}', SlimFC(in_size=ins, out_size=h, activation_fn='relu'))\n ins = h\n layers.add_module(f'q_out_{i}', SlimFC(ins, 1, activation_fn=None))\n self.q_nets.append(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n q_outs = []\n for i in range(self.num_candidates):\n user_cat_doc = torch.cat([user, docs[i]], dim=1)\n q_outs.append(self.q_nets[i](user_cat_doc))\n return torch.cat(q_outs, dim=1)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "QValueModel", "detected_licenses": ["MIT", "BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QValueModel:\n\n def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)):\n \"\"\"Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\"\"\"\n <|body_0|>\n\n def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType:\n \"\"\"Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.orig_obs_space = obs_space\n self.embedding_size = self.orig_obs_space['doc']['0'].shape[0]\n self.num_candidates = len(self.orig_obs_space['doc'])\n assert self.orig_obs_space['user'].shape[0] == self.embedding_size\n self.q_nets = nn.ModuleList()\n for i in range(self.num_candidates):\n layers = nn.Sequential()\n ins = 2 * self.embedding_size\n for j, h in enumerate(fcnet_hiddens_per_candidate):\n layers.add_module(f'q_layer_{i}_{j}', SlimFC(in_size=ins, out_size=h, activation_fn='relu'))\n ins = h\n layers.add_module(f'q_out_{i}', SlimFC(ins, 1, activation_fn=None))\n self.q_nets.append(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n q_outs = []\n for i in range(self.num_candidates):\n user_cat_doc = torch.cat([user, docs[i]], dim=1)\n q_outs.append(self.q_nets[i](user_cat_doc))\n return torch.cat(q_outs, dim=1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000049", "length_bytes": 6840, "license_type": "permissive", "methods": [{"docstring": "Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.", "name": "__init__", "signature": "def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32))"}, {"docstring": "Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.", "name": "forward", "signature": "def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004927", "prompt": "Implement the Python class `QValueModel` described below.\n\nClass description:\nImplement the QValueModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)): Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\n- def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType: Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.", "prompted_full_text": "Implement the Python class `QValueModel` described below.\n\nClass description:\nImplement the QValueModel class.\n\nMethod signatures and docstrings:\n- def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)): Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\n- def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType: Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.\n\n<|skeleton|>\nclass QValueModel:\n\n def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)):\n \"\"\"Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\"\"\"\n <|body_0|>\n\n def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType:\n \"\"\"Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.orig_obs_space = obs_space\n self.embedding_size = self.orig_obs_space['doc']['0'].shape[0]\n self.num_candidates = len(self.orig_obs_space['doc'])\n assert self.orig_obs_space['user'].shape[0] == self.embedding_size\n self.q_nets = nn.ModuleList()\n for i in range(self.num_candidates):\n layers = nn.Sequential()\n ins = 2 * self.embedding_size\n for j, h in enumerate(fcnet_hiddens_per_candidate):\n layers.add_module(f'q_layer_{i}_{j}', SlimFC(in_size=ins, out_size=h, activation_fn='relu'))\n ins = h\n layers.add_module(f'q_out_{i}', SlimFC(ins, 1, activation_fn=None))\n self.q_nets.append(layers)\n<|end_body_0|>\n\n<|body_start_1|>\n q_outs = []\n for i in range(self.num_candidates):\n user_cat_doc = torch.cat([user, docs[i]], dim=1)\n q_outs.append(self.q_nets[i](user_cat_doc))\n return torch.cat(q_outs, dim=1)\n<|end_body_1|>\n", "revision_id": "edba68c3e7cf255d1d6479329f305adb7fa4c3ed", "skeleton": "<|skeleton|>\nclass QValueModel:\n\n def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)):\n \"\"\"Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\"\"\"\n <|body_0|>\n\n def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType:\n \"\"\"Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class QValueModel:\n def __init__(self, obs_space: gym.spaces.Space, fcnet_hiddens_per_candidate=(256, 32)):\n \"\"\"Initializes a QValueModel instance. Each document candidate receives one full Q-value stack, defined by `fcnet_hiddens_per_candidate`. The input to each of these Q-value stacks is always {[user] concat [document[i]] for i in document_candidates}. Extra model kwargs: fcnet_hiddens_per_candidate: List of layer-sizes for each(!) of the candidate documents.\"\"\"\n super().__init__()\n self.orig_obs_space = obs_space\n self.embedding_size = self.orig_obs_space['doc']['0'].shape[0]\n self.num_candidates = len(self.orig_obs_space['doc'])\n assert self.orig_obs_space['user'].shape[0] == self.embedding_size\n self.q_nets = nn.ModuleList()\n for i in range(self.num_candidates):\n layers = nn.Sequential()\n ins = 2 * self.embedding_size\n for j, h in enumerate(fcnet_hiddens_per_candidate):\n layers.add_module(f'q_layer_{i}_{j}', SlimFC(in_size=ins, out_size=h, activation_fn='relu'))\n ins = h\n layers.add_module(f'q_out_{i}', SlimFC(ins, 1, activation_fn=None))\n self.q_nets.append(layers)\n\n def forward(self, user: TensorType, docs: List[TensorType]) -> TensorType:\n \"\"\"Returns Q-values, 1 for each candidate document, given user and doc tensors. Args: user: [B x u] where u=embedding of user features. docs: List[[B x d]] where d=embedding of doc features. Each item in the list represents one document candidate. Returns: Tensor ([batch, num candidates) of Q-values. 1 Q-value per document candidate.\"\"\"\n q_outs = []\n for i in range(self.num_candidates):\n user_cat_doc = torch.cat([user, docs[i]], dim=1)\n q_outs.append(self.q_nets[i](user_cat_doc))\n return torch.cat(q_outs, dim=1)\n", "source": "the_stack_v2_python_sparse", "source_path": "rllib/algorithms/slateq/slateq_torch_model.py", "source_repo": "ray-project/ray", "split": "val", "star_events_count": 29482} {"blob_id": "8bde023e3b8bb84b45cb14da24585f8a5fe013b0", "bodies": ["cls.flags = magic.MAGIC_NONE\nif mime:\n cls.flags |= magic.MAGIC_MIME\nif mime_encoding:\n cls.flags |= magic.MAGIC_MIME_ENCODING\nif keep_going:\n cls.flags |= magic.MAGIC_CONTINUE\ncls.old_api = True\ntry:\n cls.cookie = magic.open(cls.flags)\n if magic_file and os.path.exists(magic_file):\n cls.cookie.load(magic_file)\n else:\n cls.cookie.load()\nexcept AttributeError:\n cls.old_api = False\n cls.cookie = magic.Magic(mime=mime, magic_file=magic_file, mime_encoding=mime_encoding, keep_going=keep_going)\n cls.cookie.file = cls.cookie.from_file\n cls.cookie.buffer = cls.cookie.from_buffer", "cls._initialize(**kwargs)\ntry:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\nexcept magic.MagicException:\n filetype = None\nreturn filetype", "cls._initialize(**kwargs)\ntry:\n filetype = cls.cookie.file(filename)\n if cls.old_api:\n cls.cookie.close()\nexcept magic.MagicException:\n filetype = None\nreturn filetype"], "bodies_text": "<|body_start_0|>\n cls.flags = magic.MAGIC_NONE\n if mime:\n cls.flags |= magic.MAGIC_MIME\n if mime_encoding:\n cls.flags |= magic.MAGIC_MIME_ENCODING\n if keep_going:\n cls.flags |= magic.MAGIC_CONTINUE\n cls.old_api = True\n try:\n cls.cookie = magic.open(cls.flags)\n if magic_file and os.path.exists(magic_file):\n cls.cookie.load(magic_file)\n else:\n cls.cookie.load()\n except AttributeError:\n cls.old_api = False\n cls.cookie = magic.Magic(mime=mime, magic_file=magic_file, mime_encoding=mime_encoding, keep_going=keep_going)\n cls.cookie.file = cls.cookie.from_file\n cls.cookie.buffer = cls.cookie.from_buffer\n<|end_body_0|>\n\n<|body_start_1|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_1|>\n\n<|body_start_2|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.file(filename)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_2|>\n", "class_docstring": "Factory class for python-magic", "class_name": "Magic", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Magic:\n \"\"\"Factory class for python-magic\"\"\"\n\n def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False):\n \"\"\"Initialize python-magic\"\"\"\n <|body_0|>\n\n def from_buffer(cls, buf, **kwargs):\n \"\"\"Compute mimetype from a buffer :param buf: buffer from where to get data\"\"\"\n <|body_1|>\n\n def from_file(cls, filename, **kwargs):\n \"\"\"Compute mimetype from file :param filename: name of a file from where to get data\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.flags = magic.MAGIC_NONE\n if mime:\n cls.flags |= magic.MAGIC_MIME\n if mime_encoding:\n cls.flags |= magic.MAGIC_MIME_ENCODING\n if keep_going:\n cls.flags |= magic.MAGIC_CONTINUE\n cls.old_api = True\n try:\n cls.cookie = magic.open(cls.flags)\n if magic_file and os.path.exists(magic_file):\n cls.cookie.load(magic_file)\n else:\n cls.cookie.load()\n except AttributeError:\n cls.old_api = False\n cls.cookie = magic.Magic(mime=mime, magic_file=magic_file, mime_encoding=mime_encoding, keep_going=keep_going)\n cls.cookie.file = cls.cookie.from_file\n cls.cookie.buffer = cls.cookie.from_buffer\n<|end_body_0|>\n\n<|body_start_1|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_1|>\n\n<|body_start_2|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.file(filename)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000050", "length_bytes": 3113, "license_type": "permissive", "methods": [{"docstring": "Initialize python-magic", "name": "_initialize", "signature": "def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False)"}, {"docstring": "Compute mimetype from a buffer :param buf: buffer from where to get data", "name": "from_buffer", "signature": "def from_buffer(cls, buf, **kwargs)"}, {"docstring": "Compute mimetype from file :param filename: name of a file from where to get data", "name": "from_file", "signature": "def from_file(cls, filename, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003692", "prompt": "Implement the Python class `Magic` described below.\n\nClass description:\nFactory class for python-magic\n\nMethod signatures and docstrings:\n- def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False): Initialize python-magic\n- def from_buffer(cls, buf, **kwargs): Compute mimetype from a buffer :param buf: buffer from where to get data\n- def from_file(cls, filename, **kwargs): Compute mimetype from file :param filename: name of a file from where to get data", "prompted_full_text": "Implement the Python class `Magic` described below.\n\nClass description:\nFactory class for python-magic\n\nMethod signatures and docstrings:\n- def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False): Initialize python-magic\n- def from_buffer(cls, buf, **kwargs): Compute mimetype from a buffer :param buf: buffer from where to get data\n- def from_file(cls, filename, **kwargs): Compute mimetype from file :param filename: name of a file from where to get data\n\n<|skeleton|>\nclass Magic:\n \"\"\"Factory class for python-magic\"\"\"\n\n def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False):\n \"\"\"Initialize python-magic\"\"\"\n <|body_0|>\n\n def from_buffer(cls, buf, **kwargs):\n \"\"\"Compute mimetype from a buffer :param buf: buffer from where to get data\"\"\"\n <|body_1|>\n\n def from_file(cls, filename, **kwargs):\n \"\"\"Compute mimetype from file :param filename: name of a file from where to get data\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.flags = magic.MAGIC_NONE\n if mime:\n cls.flags |= magic.MAGIC_MIME\n if mime_encoding:\n cls.flags |= magic.MAGIC_MIME_ENCODING\n if keep_going:\n cls.flags |= magic.MAGIC_CONTINUE\n cls.old_api = True\n try:\n cls.cookie = magic.open(cls.flags)\n if magic_file and os.path.exists(magic_file):\n cls.cookie.load(magic_file)\n else:\n cls.cookie.load()\n except AttributeError:\n cls.old_api = False\n cls.cookie = magic.Magic(mime=mime, magic_file=magic_file, mime_encoding=mime_encoding, keep_going=keep_going)\n cls.cookie.file = cls.cookie.from_file\n cls.cookie.buffer = cls.cookie.from_buffer\n<|end_body_0|>\n\n<|body_start_1|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_1|>\n\n<|body_start_2|>\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.file(filename)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n<|end_body_2|>\n", "revision_id": "4e3e2c0fa82e352a1a7a7fd02381a4d84bed9f09", "skeleton": "<|skeleton|>\nclass Magic:\n \"\"\"Factory class for python-magic\"\"\"\n\n def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False):\n \"\"\"Initialize python-magic\"\"\"\n <|body_0|>\n\n def from_buffer(cls, buf, **kwargs):\n \"\"\"Compute mimetype from a buffer :param buf: buffer from where to get data\"\"\"\n <|body_1|>\n\n def from_file(cls, filename, **kwargs):\n \"\"\"Compute mimetype from file :param filename: name of a file from where to get data\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Magic:\n \"\"\"Factory class for python-magic\"\"\"\n\n def _initialize(cls, magic_file=None, mime=False, mime_encoding=False, keep_going=False):\n \"\"\"Initialize python-magic\"\"\"\n cls.flags = magic.MAGIC_NONE\n if mime:\n cls.flags |= magic.MAGIC_MIME\n if mime_encoding:\n cls.flags |= magic.MAGIC_MIME_ENCODING\n if keep_going:\n cls.flags |= magic.MAGIC_CONTINUE\n cls.old_api = True\n try:\n cls.cookie = magic.open(cls.flags)\n if magic_file and os.path.exists(magic_file):\n cls.cookie.load(magic_file)\n else:\n cls.cookie.load()\n except AttributeError:\n cls.old_api = False\n cls.cookie = magic.Magic(mime=mime, magic_file=magic_file, mime_encoding=mime_encoding, keep_going=keep_going)\n cls.cookie.file = cls.cookie.from_file\n cls.cookie.buffer = cls.cookie.from_buffer\n\n def from_buffer(cls, buf, **kwargs):\n \"\"\"Compute mimetype from a buffer :param buf: buffer from where to get data\"\"\"\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.buffer(buf)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n\n def from_file(cls, filename, **kwargs):\n \"\"\"Compute mimetype from file :param filename: name of a file from where to get data\"\"\"\n cls._initialize(**kwargs)\n try:\n filetype = cls.cookie.file(filename)\n if cls.old_api:\n cls.cookie.close()\n except magic.MagicException:\n filetype = None\n return filetype\n", "source": "the_stack_v2_python_sparse", "source_path": "common/src/utils/mimetypes.py", "source_repo": "quarkslab/irma", "split": "val", "star_events_count": 267} {"blob_id": "6ced3c0472633753126be4992303a1f4c315f026", "bodies": ["self.directory = None\nself.cuda = True\nself.augmentation = None\nself.loss = None\nself.summary_gradients = None\nself.trainloader = None\nself.testloader = None\nself.epochs = None\nself.snapshot = None\nself.get_writer = None\nself.get_optimizer = None\nself.get_scheduler = None\nself.get_model = None", "assert self.directory is not None\nassert len(self.directory) > 0\nassert self.augmentation is None or isinstance(self.augmentation, iaa.meta.Augmenter)\nassert isinstance(self.trainloader, torch.utils.data.DataLoader)\nassert len(self.trainloader) > 0\nassert isinstance(self.testloader, torch.utils.data.DataLoader)\nassert len(self.testloader) > 0\nassert self.epochs > 0\nassert self.snapshot is None or self.snapshot > 0\nassert callable(self.get_optimizer)\nassert callable(self.get_scheduler)\nassert callable(self.get_model)\nassert callable(self.get_writer)"], "bodies_text": "<|body_start_0|>\n self.directory = None\n self.cuda = True\n self.augmentation = None\n self.loss = None\n self.summary_gradients = None\n self.trainloader = None\n self.testloader = None\n self.epochs = None\n self.snapshot = None\n self.get_writer = None\n self.get_optimizer = None\n self.get_scheduler = None\n self.get_model = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.directory is not None\n assert len(self.directory) > 0\n assert self.augmentation is None or isinstance(self.augmentation, iaa.meta.Augmenter)\n assert isinstance(self.trainloader, torch.utils.data.DataLoader)\n assert len(self.trainloader) > 0\n assert isinstance(self.testloader, torch.utils.data.DataLoader)\n assert len(self.testloader) > 0\n assert self.epochs > 0\n assert self.snapshot is None or self.snapshot > 0\n assert callable(self.get_optimizer)\n assert callable(self.get_scheduler)\n assert callable(self.get_model)\n assert callable(self.get_writer)\n<|end_body_1|>\n", "class_docstring": "Configuration for normal training.", "class_name": "NormalTrainingConfig", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NormalTrainingConfig:\n \"\"\"Configuration for normal training.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def validate(self):\n \"\"\"Check validity.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.directory = None\n self.cuda = True\n self.augmentation = None\n self.loss = None\n self.summary_gradients = None\n self.trainloader = None\n self.testloader = None\n self.epochs = None\n self.snapshot = None\n self.get_writer = None\n self.get_optimizer = None\n self.get_scheduler = None\n self.get_model = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.directory is not None\n assert len(self.directory) > 0\n assert self.augmentation is None or isinstance(self.augmentation, iaa.meta.Augmenter)\n assert isinstance(self.trainloader, torch.utils.data.DataLoader)\n assert len(self.trainloader) > 0\n assert isinstance(self.testloader, torch.utils.data.DataLoader)\n assert len(self.testloader) > 0\n assert self.epochs > 0\n assert self.snapshot is None or self.snapshot > 0\n assert callable(self.get_optimizer)\n assert callable(self.get_scheduler)\n assert callable(self.get_model)\n assert callable(self.get_writer)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000051", "length_bytes": 16771, "license_type": "no_license", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Check validity.", "name": "validate", "signature": "def validate(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003948", "prompt": "Implement the Python class `NormalTrainingConfig` described below.\n\nClass description:\nConfiguration for normal training.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def validate(self): Check validity.", "prompted_full_text": "Implement the Python class `NormalTrainingConfig` described below.\n\nClass description:\nConfiguration for normal training.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def validate(self): Check validity.\n\n<|skeleton|>\nclass NormalTrainingConfig:\n \"\"\"Configuration for normal training.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def validate(self):\n \"\"\"Check validity.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.directory = None\n self.cuda = True\n self.augmentation = None\n self.loss = None\n self.summary_gradients = None\n self.trainloader = None\n self.testloader = None\n self.epochs = None\n self.snapshot = None\n self.get_writer = None\n self.get_optimizer = None\n self.get_scheduler = None\n self.get_model = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.directory is not None\n assert len(self.directory) > 0\n assert self.augmentation is None or isinstance(self.augmentation, iaa.meta.Augmenter)\n assert isinstance(self.trainloader, torch.utils.data.DataLoader)\n assert len(self.trainloader) > 0\n assert isinstance(self.testloader, torch.utils.data.DataLoader)\n assert len(self.testloader) > 0\n assert self.epochs > 0\n assert self.snapshot is None or self.snapshot > 0\n assert callable(self.get_optimizer)\n assert callable(self.get_scheduler)\n assert callable(self.get_model)\n assert callable(self.get_writer)\n<|end_body_1|>\n", "revision_id": "736c99b55a77d0c650eae5ced2d8312d13af0baf", "skeleton": "<|skeleton|>\nclass NormalTrainingConfig:\n \"\"\"Configuration for normal training.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def validate(self):\n \"\"\"Check validity.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NormalTrainingConfig:\n \"\"\"Configuration for normal training.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n self.directory = None\n self.cuda = True\n self.augmentation = None\n self.loss = None\n self.summary_gradients = None\n self.trainloader = None\n self.testloader = None\n self.epochs = None\n self.snapshot = None\n self.get_writer = None\n self.get_optimizer = None\n self.get_scheduler = None\n self.get_model = None\n\n def validate(self):\n \"\"\"Check validity.\"\"\"\n assert self.directory is not None\n assert len(self.directory) > 0\n assert self.augmentation is None or isinstance(self.augmentation, iaa.meta.Augmenter)\n assert isinstance(self.trainloader, torch.utils.data.DataLoader)\n assert len(self.trainloader) > 0\n assert isinstance(self.testloader, torch.utils.data.DataLoader)\n assert len(self.testloader) > 0\n assert self.epochs > 0\n assert self.snapshot is None or self.snapshot > 0\n assert callable(self.get_optimizer)\n assert callable(self.get_scheduler)\n assert callable(self.get_model)\n assert callable(self.get_writer)\n", "source": "the_stack_v2_python_sparse", "source_path": "common/experiments.py", "source_repo": "Adversarial-Intelligence-Group/color-adversarial-training", "split": "val", "star_events_count": 0} {"blob_id": "fb00bdb3e18dfac7ceb21663aae124cdb418fe15", "bodies": ["if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\nif '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\nself.DCXPoly = DCXPoly\nself.DCYPoly = DCYPoly\nsuper(EBType, self).__init__(**kwargs)", "if self.DCXPoly is None or self.DCYPoly is None:\n return None\nreturn numpy.array([self.DCXPoly(t), self.DCYPoly(t)])"], "bodies_text": "<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.DCXPoly = DCXPoly\n self.DCYPoly = DCYPoly\n super(EBType, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.DCXPoly is None or self.DCYPoly is None:\n return None\n return numpy.array([self.DCXPoly(t), self.DCYPoly(t)])\n<|end_body_1|>\n", "class_docstring": "Electrical boresight (EB) steering directions for an electronically steered array.", "class_name": "EBType", "detected_licenses": ["MIT", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EBType:\n \"\"\"Electrical boresight (EB) steering directions for an electronically steered array.\"\"\"\n\n def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs):\n \"\"\"Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\"\"\"\n <|body_0|>\n\n def __call__(self, t: Union[float, int, numpy.ndarray]):\n \"\"\"Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.DCXPoly = DCXPoly\n self.DCYPoly = DCYPoly\n super(EBType, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.DCXPoly is None or self.DCYPoly is None:\n return None\n return numpy.array([self.DCXPoly(t), self.DCYPoly(t)])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000052", "length_bytes": 9216, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs", "name": "__init__", "signature": "def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs)"}, {"docstring": "Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray", "name": "__call__", "signature": "def __call__(self, t: Union[float, int, numpy.ndarray])"}], "n_methods": 2, "prompt": "Implement the Python class `EBType` described below.\n\nClass description:\nElectrical boresight (EB) steering directions for an electronically steered array.\n\nMethod signatures and docstrings:\n- def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs): Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\n- def __call__(self, t: Union[float, int, numpy.ndarray]): Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray", "prompted_full_text": "Implement the Python class `EBType` described below.\n\nClass description:\nElectrical boresight (EB) steering directions for an electronically steered array.\n\nMethod signatures and docstrings:\n- def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs): Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\n- def __call__(self, t: Union[float, int, numpy.ndarray]): Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray\n\n<|skeleton|>\nclass EBType:\n \"\"\"Electrical boresight (EB) steering directions for an electronically steered array.\"\"\"\n\n def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs):\n \"\"\"Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\"\"\"\n <|body_0|>\n\n def __call__(self, t: Union[float, int, numpy.ndarray]):\n \"\"\"Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.DCXPoly = DCXPoly\n self.DCYPoly = DCYPoly\n super(EBType, self).__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.DCXPoly is None or self.DCYPoly is None:\n return None\n return numpy.array([self.DCXPoly(t), self.DCYPoly(t)])\n<|end_body_1|>\n", "revision_id": "de1b1886f161a83b6c89aadc7a2c7cfc4892ef81", "skeleton": "<|skeleton|>\nclass EBType:\n \"\"\"Electrical boresight (EB) steering directions for an electronically steered array.\"\"\"\n\n def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs):\n \"\"\"Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\"\"\"\n <|body_0|>\n\n def __call__(self, t: Union[float, int, numpy.ndarray]):\n \"\"\"Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EBType:\n \"\"\"Electrical boresight (EB) steering directions for an electronically steered array.\"\"\"\n\n def __init__(self, DCXPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, DCYPoly: Union[Poly1DType, numpy.ndarray, list, tuple]=None, **kwargs):\n \"\"\"Parameters ---------- DCXPoly : Poly1DType|numpy.ndarray|list|tuple DCYPoly : Poly1DType|numpy.ndarray|list|tuple kwargs\"\"\"\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.DCXPoly = DCXPoly\n self.DCYPoly = DCYPoly\n super(EBType, self).__init__(**kwargs)\n\n def __call__(self, t: Union[float, int, numpy.ndarray]):\n \"\"\"Evaluate the polynomial at points `t`. This passes `t` straight through to :func:`polyval` of `numpy.polynomial.polynomial` for each of `DCXPoly,DCYPoly` components. If any of `DCXPoly,DCYPoly` is not populated, then `None` is returned. Parameters ---------- t : float|int|numpy.ndarray The point(s) at which to evaluate. Returns ------- None|numpy.ndarray\"\"\"\n if self.DCXPoly is None or self.DCYPoly is None:\n return None\n return numpy.array([self.DCXPoly(t), self.DCYPoly(t)])\n", "source": "the_stack_v2_python_sparse", "source_path": "sarpy/io/complex/sicd_elements/Antenna.py", "source_repo": "ngageoint/sarpy", "split": "val", "star_events_count": 192} {"blob_id": "8cd6b630fe3755d48fbd8218cc395b12b8528bff", "bodies": ["super().__init__()\nargs = parse_args()\nif args.model_name == 'faster_rcnn':\n self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).eval()\nelif args.model_name == 'yolov3':\n yolov3_url = 'https://github.com/ultralytics/yolov3/releases/download/v9.6.0/yolov3.pt'\n if not os.path.exists('yolov3.pt'):\n wget.download(yolov3_url, out='./')\n self.model = torch.load('yolov3.pt')\nelse:\n raise Exception('net type [%s] invalid! \\n please specify corret model_name' % args.model_name)", "with torch.no_grad():\n y_pred = self.model(x)\n return y_pred"], "bodies_text": "<|body_start_0|>\n super().__init__()\n args = parse_args()\n if args.model_name == 'faster_rcnn':\n self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).eval()\n elif args.model_name == 'yolov3':\n yolov3_url = 'https://github.com/ultralytics/yolov3/releases/download/v9.6.0/yolov3.pt'\n if not os.path.exists('yolov3.pt'):\n wget.download(yolov3_url, out='./')\n self.model = torch.load('yolov3.pt')\n else:\n raise Exception('net type [%s] invalid! \\n please specify corret model_name' % args.model_name)\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n y_pred = self.model(x)\n return y_pred\n<|end_body_1|>\n", "class_docstring": "python inference model", "class_name": "Predictor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Predictor:\n \"\"\"python inference model\"\"\"\n\n def __init__(self):\n \"\"\"model name\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"model forward inference Args: x: input Returns: y_pred: output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n args = parse_args()\n if args.model_name == 'faster_rcnn':\n self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).eval()\n elif args.model_name == 'yolov3':\n yolov3_url = 'https://github.com/ultralytics/yolov3/releases/download/v9.6.0/yolov3.pt'\n if not os.path.exists('yolov3.pt'):\n wget.download(yolov3_url, out='./')\n self.model = torch.load('yolov3.pt')\n else:\n raise Exception('net type [%s] invalid! \\n please specify corret model_name' % args.model_name)\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n y_pred = self.model(x)\n return y_pred\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000053", "length_bytes": 6047, "license_type": "no_license", "methods": [{"docstring": "model name", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "model forward inference Args: x: input Returns: y_pred: output", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `Predictor` described below.\n\nClass description:\npython inference model\n\nMethod signatures and docstrings:\n- def __init__(self): model name\n- def forward(self, x): model forward inference Args: x: input Returns: y_pred: output", "prompted_full_text": "Implement the Python class `Predictor` described below.\n\nClass description:\npython inference model\n\nMethod signatures and docstrings:\n- def __init__(self): model name\n- def forward(self, x): model forward inference Args: x: input Returns: y_pred: output\n\n<|skeleton|>\nclass Predictor:\n \"\"\"python inference model\"\"\"\n\n def __init__(self):\n \"\"\"model name\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"model forward inference Args: x: input Returns: y_pred: output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n args = parse_args()\n if args.model_name == 'faster_rcnn':\n self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).eval()\n elif args.model_name == 'yolov3':\n yolov3_url = 'https://github.com/ultralytics/yolov3/releases/download/v9.6.0/yolov3.pt'\n if not os.path.exists('yolov3.pt'):\n wget.download(yolov3_url, out='./')\n self.model = torch.load('yolov3.pt')\n else:\n raise Exception('net type [%s] invalid! \\n please specify corret model_name' % args.model_name)\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n y_pred = self.model(x)\n return y_pred\n<|end_body_1|>\n", "revision_id": "bd3790ce72a2a26611b5eda3901651b5a809348f", "skeleton": "<|skeleton|>\nclass Predictor:\n \"\"\"python inference model\"\"\"\n\n def __init__(self):\n \"\"\"model name\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"model forward inference Args: x: input Returns: y_pred: output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Predictor:\n \"\"\"python inference model\"\"\"\n\n def __init__(self):\n \"\"\"model name\"\"\"\n super().__init__()\n args = parse_args()\n if args.model_name == 'faster_rcnn':\n self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).eval()\n elif args.model_name == 'yolov3':\n yolov3_url = 'https://github.com/ultralytics/yolov3/releases/download/v9.6.0/yolov3.pt'\n if not os.path.exists('yolov3.pt'):\n wget.download(yolov3_url, out='./')\n self.model = torch.load('yolov3.pt')\n else:\n raise Exception('net type [%s] invalid! \\n please specify corret model_name' % args.model_name)\n\n def forward(self, x):\n \"\"\"model forward inference Args: x: input Returns: y_pred: output\"\"\"\n with torch.no_grad():\n y_pred = self.model(x)\n return y_pred\n", "source": "the_stack_v2_python_sparse", "source_path": "inference/benchmark/python/torch/detection_benchmark.py", "source_repo": "PaddlePaddle/PaddleTest", "split": "val", "star_events_count": 42} {"blob_id": "a096c0ee3a624af25f5592acad3c1d77e8f2c2b9", "bodies": ["mes = ''\ntry:\n url = 'https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json'\n res = requests.get(url, {'api-key': BS_BOOK_KEY})\n allList = json.loads(res.text)\n books = allList['results']['books']\n if len(books) == 0:\n return ''\n book = random.choice(list(books))\n mes = 'Not interested in research anymore? Maybe a book can help you refresh yourself.'\n mes += ' Here is a bestseller book recommendation by New York Times '\n mes += book['title'] + ' by ' + book['author'] + ' '\n if 'description' in book:\n mes += book['description']\nexcept:\n pass\nreturn mes", "if token is None or email is None:\n return False\nstmt = 'SELECT `e_mail` FROM ' + USER_TABLENAME\nstmt += ' WHERE `token` = \"' + token + '\"'\ncursor = connection.cursor()\ncursor.execute(stmt)\nrecords = cursor.fetchall()\nif len(records) != 1:\n return False\nreturn records[0][0] == email", "stmt = ''\ntry:\n if request.POST.get('token'):\n token = request.POST.get('token')\n else:\n return Response('No token found.', 403)\n if request.POST.get('email'):\n email = request.POST.get('email')\n else:\n return Response('No e-mail address found.', 403)\n if not DeleteUser.verifyTokenAndMail(token, email):\n return Response('No user found.', 403)\n else:\n stmt = 'DELETE FROM `' + USER_TABLENAME + '` WHERE token = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n return Response(DeleteUser.bestsellers(), 200)\nexcept:\n return Response('Cannot retrieve your token or your mail at the moment, sorry.', 403)"], "bodies_text": "<|body_start_0|>\n mes = ''\n try:\n url = 'https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json'\n res = requests.get(url, {'api-key': BS_BOOK_KEY})\n allList = json.loads(res.text)\n books = allList['results']['books']\n if len(books) == 0:\n return ''\n book = random.choice(list(books))\n mes = 'Not interested in research anymore? Maybe a book can help you refresh yourself.'\n mes += ' Here is a bestseller book recommendation by New York Times '\n mes += book['title'] + ' by ' + book['author'] + ' '\n if 'description' in book:\n mes += book['description']\n except:\n pass\n return mes\n<|end_body_0|>\n\n<|body_start_1|>\n if token is None or email is None:\n return False\n stmt = 'SELECT `e_mail` FROM ' + USER_TABLENAME\n stmt += ' WHERE `token` = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n records = cursor.fetchall()\n if len(records) != 1:\n return False\n return records[0][0] == email\n<|end_body_1|>\n\n<|body_start_2|>\n stmt = ''\n try:\n if request.POST.get('token'):\n token = request.POST.get('token')\n else:\n return Response('No token found.', 403)\n if request.POST.get('email'):\n email = request.POST.get('email')\n else:\n return Response('No e-mail address found.', 403)\n if not DeleteUser.verifyTokenAndMail(token, email):\n return Response('No user found.', 403)\n else:\n stmt = 'DELETE FROM `' + USER_TABLENAME + '` WHERE token = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n return Response(DeleteUser.bestsellers(), 200)\n except:\n return Response('Cannot retrieve your token or your mail at the moment, sorry.', 403)\n<|end_body_2|>\n", "class_docstring": "In this class, delete user endpoint is implemented", "class_name": "DeleteUser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeleteUser:\n \"\"\"In this class, delete user endpoint is implemented\"\"\"\n\n def bestsellers():\n \"\"\"returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\"\"\"\n <|body_0|>\n\n def verifyTokenAndMail(token=None, email=None):\n \"\"\"where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\"\"\"\n <|body_1|>\n\n def deleteUser2(request):\n \"\"\"where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mes = ''\n try:\n url = 'https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json'\n res = requests.get(url, {'api-key': BS_BOOK_KEY})\n allList = json.loads(res.text)\n books = allList['results']['books']\n if len(books) == 0:\n return ''\n book = random.choice(list(books))\n mes = 'Not interested in research anymore? Maybe a book can help you refresh yourself.'\n mes += ' Here is a bestseller book recommendation by New York Times '\n mes += book['title'] + ' by ' + book['author'] + ' '\n if 'description' in book:\n mes += book['description']\n except:\n pass\n return mes\n<|end_body_0|>\n\n<|body_start_1|>\n if token is None or email is None:\n return False\n stmt = 'SELECT `e_mail` FROM ' + USER_TABLENAME\n stmt += ' WHERE `token` = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n records = cursor.fetchall()\n if len(records) != 1:\n return False\n return records[0][0] == email\n<|end_body_1|>\n\n<|body_start_2|>\n stmt = ''\n try:\n if request.POST.get('token'):\n token = request.POST.get('token')\n else:\n return Response('No token found.', 403)\n if request.POST.get('email'):\n email = request.POST.get('email')\n else:\n return Response('No e-mail address found.', 403)\n if not DeleteUser.verifyTokenAndMail(token, email):\n return Response('No user found.', 403)\n else:\n stmt = 'DELETE FROM `' + USER_TABLENAME + '` WHERE token = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n return Response(DeleteUser.bestsellers(), 200)\n except:\n return Response('Cannot retrieve your token or your mail at the moment, sorry.', 403)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000054", "length_bytes": 4470, "license_type": "no_license", "methods": [{"docstring": "returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.", "name": "bestsellers", "signature": "def bestsellers()"}, {"docstring": "where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.", "name": "verifyTokenAndMail", "signature": "def verifyTokenAndMail(token=None, email=None)"}, {"docstring": "where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.", "name": "deleteUser2", "signature": "def deleteUser2(request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003012", "prompt": "Implement the Python class `DeleteUser` described below.\n\nClass description:\nIn this class, delete user endpoint is implemented\n\nMethod signatures and docstrings:\n- def bestsellers(): returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\n- def verifyTokenAndMail(token=None, email=None): where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\n- def deleteUser2(request): where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.", "prompted_full_text": "Implement the Python class `DeleteUser` described below.\n\nClass description:\nIn this class, delete user endpoint is implemented\n\nMethod signatures and docstrings:\n- def bestsellers(): returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\n- def verifyTokenAndMail(token=None, email=None): where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\n- def deleteUser2(request): where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.\n\n<|skeleton|>\nclass DeleteUser:\n \"\"\"In this class, delete user endpoint is implemented\"\"\"\n\n def bestsellers():\n \"\"\"returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\"\"\"\n <|body_0|>\n\n def verifyTokenAndMail(token=None, email=None):\n \"\"\"where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\"\"\"\n <|body_1|>\n\n def deleteUser2(request):\n \"\"\"where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mes = ''\n try:\n url = 'https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json'\n res = requests.get(url, {'api-key': BS_BOOK_KEY})\n allList = json.loads(res.text)\n books = allList['results']['books']\n if len(books) == 0:\n return ''\n book = random.choice(list(books))\n mes = 'Not interested in research anymore? Maybe a book can help you refresh yourself.'\n mes += ' Here is a bestseller book recommendation by New York Times '\n mes += book['title'] + ' by ' + book['author'] + ' '\n if 'description' in book:\n mes += book['description']\n except:\n pass\n return mes\n<|end_body_0|>\n\n<|body_start_1|>\n if token is None or email is None:\n return False\n stmt = 'SELECT `e_mail` FROM ' + USER_TABLENAME\n stmt += ' WHERE `token` = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n records = cursor.fetchall()\n if len(records) != 1:\n return False\n return records[0][0] == email\n<|end_body_1|>\n\n<|body_start_2|>\n stmt = ''\n try:\n if request.POST.get('token'):\n token = request.POST.get('token')\n else:\n return Response('No token found.', 403)\n if request.POST.get('email'):\n email = request.POST.get('email')\n else:\n return Response('No e-mail address found.', 403)\n if not DeleteUser.verifyTokenAndMail(token, email):\n return Response('No user found.', 403)\n else:\n stmt = 'DELETE FROM `' + USER_TABLENAME + '` WHERE token = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n return Response(DeleteUser.bestsellers(), 200)\n except:\n return Response('Cannot retrieve your token or your mail at the moment, sorry.', 403)\n<|end_body_2|>\n", "revision_id": "f7aebee17a0a79e8d3c2927733bce8015b4a9da3", "skeleton": "<|skeleton|>\nclass DeleteUser:\n \"\"\"In this class, delete user endpoint is implemented\"\"\"\n\n def bestsellers():\n \"\"\"returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\"\"\"\n <|body_0|>\n\n def verifyTokenAndMail(token=None, email=None):\n \"\"\"where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\"\"\"\n <|body_1|>\n\n def deleteUser2(request):\n \"\"\"where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeleteUser:\n \"\"\"In this class, delete user endpoint is implemented\"\"\"\n\n def bestsellers():\n \"\"\"returns a message including a book recommendation in the format \"NAME OF THE BOOK \" by \"AUTHOR\". \"DESCRIPTION\". This function returns a message including a book recomendation taken by NYT bestseller list.\"\"\"\n mes = ''\n try:\n url = 'https://api.nytimes.com/svc/books/v3/lists/current/hardcover-fiction.json'\n res = requests.get(url, {'api-key': BS_BOOK_KEY})\n allList = json.loads(res.text)\n books = allList['results']['books']\n if len(books) == 0:\n return ''\n book = random.choice(list(books))\n mes = 'Not interested in research anymore? Maybe a book can help you refresh yourself.'\n mes += ' Here is a bestseller book recommendation by New York Times '\n mes += book['title'] + ' by ' + book['author'] + ' '\n if 'description' in book:\n mes += book['description']\n except:\n pass\n return mes\n\n def verifyTokenAndMail(token=None, email=None):\n \"\"\"where 'token': string, 64 characther string that can be token where 'email': string returns TRUE if the token and the email belongs to a specific user in the database This function verifies if both the mail and token exist in the database.\"\"\"\n if token is None or email is None:\n return False\n stmt = 'SELECT `e_mail` FROM ' + USER_TABLENAME\n stmt += ' WHERE `token` = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n records = cursor.fetchall()\n if len(records) != 1:\n return False\n return records[0][0] == email\n\n def deleteUser2(request):\n \"\"\"where 'request': HTTP request that is from the view class returns nothing if everything goes right, returns an error message otherwise. This function deletes the logged in user.\"\"\"\n stmt = ''\n try:\n if request.POST.get('token'):\n token = request.POST.get('token')\n else:\n return Response('No token found.', 403)\n if request.POST.get('email'):\n email = request.POST.get('email')\n else:\n return Response('No e-mail address found.', 403)\n if not DeleteUser.verifyTokenAndMail(token, email):\n return Response('No user found.', 403)\n else:\n stmt = 'DELETE FROM `' + USER_TABLENAME + '` WHERE token = \"' + token + '\"'\n cursor = connection.cursor()\n cursor.execute(stmt)\n return Response(DeleteUser.bestsellers(), 200)\n except:\n return Response('Cannot retrieve your token or your mail at the moment, sorry.', 403)\n", "source": "the_stack_v2_python_sparse", "source_path": "practice-app/platon_api/rest_api/delete_user_t/delete_user_f.py", "source_repo": "bounswe/bounswe2020group7", "split": "val", "star_events_count": 18} {"blob_id": "635c5bee25dbb5c2f14c3e1cb6c16f805c9dc727", "bodies": ["Company = self.old_state.apps.get_model('company', 'company')\nsupplier = Company.objects.create(name='Supplier A', description='A great supplier!', is_supplier=True, is_customer=True)\nPurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\nSalesOrder = self.old_state.apps.get_model('order', 'salesorder')\nfor ii in range(10):\n order = PurchaseOrder.objects.create(supplier=supplier, reference=f'{ii}-abcde', description='Just a test order')\n with self.assertRaises(AttributeError):\n print(order.reference_int)\n sales_order = SalesOrder.objects.create(customer=supplier, reference=f'{ii}-xyz', description='A test sales order')\n with self.assertRaises(AttributeError):\n print(sales_order.reference_int)\nself.po_pk = PurchaseOrder.objects.create(supplier=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\nself.so_pk = SalesOrder.objects.create(customer=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk", "PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\nSalesOrder = self.new_state.apps.get_model('order', 'salesorder')\nfor ii in range(10):\n po = PurchaseOrder.objects.get(reference=f'{ii}-abcde')\n so = SalesOrder.objects.get(reference=f'{ii}-xyz')\n self.assertEqual(po.reference_int, ii)\n self.assertEqual(so.reference_int, ii)\npo = PurchaseOrder.objects.get(pk=self.po_pk)\nself.assertEqual(po.reference, '999999999999999999999999999999999')\nself.assertEqual(po.reference_int, 2147483647)\nso = SalesOrder.objects.get(pk=self.so_pk)\nself.assertEqual(so.reference, '999999999999999999999999999999999')\nself.assertEqual(so.reference_int, 2147483647)"], "bodies_text": "<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n supplier = Company.objects.create(name='Supplier A', description='A great supplier!', is_supplier=True, is_customer=True)\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n order = PurchaseOrder.objects.create(supplier=supplier, reference=f'{ii}-abcde', description='Just a test order')\n with self.assertRaises(AttributeError):\n print(order.reference_int)\n sales_order = SalesOrder.objects.create(customer=supplier, reference=f'{ii}-xyz', description='A test sales order')\n with self.assertRaises(AttributeError):\n print(sales_order.reference_int)\n self.po_pk = PurchaseOrder.objects.create(supplier=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n self.so_pk = SalesOrder.objects.create(customer=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n<|end_body_0|>\n\n<|body_start_1|>\n PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n po = PurchaseOrder.objects.get(reference=f'{ii}-abcde')\n so = SalesOrder.objects.get(reference=f'{ii}-xyz')\n self.assertEqual(po.reference_int, ii)\n self.assertEqual(so.reference_int, ii)\n po = PurchaseOrder.objects.get(pk=self.po_pk)\n self.assertEqual(po.reference, '999999999999999999999999999999999')\n self.assertEqual(po.reference_int, 2147483647)\n so = SalesOrder.objects.get(pk=self.so_pk)\n self.assertEqual(so.reference, '999999999999999999999999999999999')\n self.assertEqual(so.reference_int, 2147483647)\n<|end_body_1|>\n", "class_docstring": "Test entire schema migration.", "class_name": "TestRefIntMigrations", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestRefIntMigrations:\n \"\"\"Test entire schema migration.\"\"\"\n\n def prepare(self):\n \"\"\"Create initial data set.\"\"\"\n <|body_0|>\n\n def test_ref_field(self):\n \"\"\"Test that the 'reference_int' field has been created and is filled out correctly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n supplier = Company.objects.create(name='Supplier A', description='A great supplier!', is_supplier=True, is_customer=True)\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n order = PurchaseOrder.objects.create(supplier=supplier, reference=f'{ii}-abcde', description='Just a test order')\n with self.assertRaises(AttributeError):\n print(order.reference_int)\n sales_order = SalesOrder.objects.create(customer=supplier, reference=f'{ii}-xyz', description='A test sales order')\n with self.assertRaises(AttributeError):\n print(sales_order.reference_int)\n self.po_pk = PurchaseOrder.objects.create(supplier=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n self.so_pk = SalesOrder.objects.create(customer=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n<|end_body_0|>\n\n<|body_start_1|>\n PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n po = PurchaseOrder.objects.get(reference=f'{ii}-abcde')\n so = SalesOrder.objects.get(reference=f'{ii}-xyz')\n self.assertEqual(po.reference_int, ii)\n self.assertEqual(so.reference_int, ii)\n po = PurchaseOrder.objects.get(pk=self.po_pk)\n self.assertEqual(po.reference, '999999999999999999999999999999999')\n self.assertEqual(po.reference_int, 2147483647)\n so = SalesOrder.objects.get(pk=self.so_pk)\n self.assertEqual(so.reference, '999999999999999999999999999999999')\n self.assertEqual(so.reference_int, 2147483647)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000055", "length_bytes": 7688, "license_type": "permissive", "methods": [{"docstring": "Create initial data set.", "name": "prepare", "signature": "def prepare(self)"}, {"docstring": "Test that the 'reference_int' field has been created and is filled out correctly.", "name": "test_ref_field", "signature": "def test_ref_field(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006167", "prompt": "Implement the Python class `TestRefIntMigrations` described below.\n\nClass description:\nTest entire schema migration.\n\nMethod signatures and docstrings:\n- def prepare(self): Create initial data set.\n- def test_ref_field(self): Test that the 'reference_int' field has been created and is filled out correctly.", "prompted_full_text": "Implement the Python class `TestRefIntMigrations` described below.\n\nClass description:\nTest entire schema migration.\n\nMethod signatures and docstrings:\n- def prepare(self): Create initial data set.\n- def test_ref_field(self): Test that the 'reference_int' field has been created and is filled out correctly.\n\n<|skeleton|>\nclass TestRefIntMigrations:\n \"\"\"Test entire schema migration.\"\"\"\n\n def prepare(self):\n \"\"\"Create initial data set.\"\"\"\n <|body_0|>\n\n def test_ref_field(self):\n \"\"\"Test that the 'reference_int' field has been created and is filled out correctly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n supplier = Company.objects.create(name='Supplier A', description='A great supplier!', is_supplier=True, is_customer=True)\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n order = PurchaseOrder.objects.create(supplier=supplier, reference=f'{ii}-abcde', description='Just a test order')\n with self.assertRaises(AttributeError):\n print(order.reference_int)\n sales_order = SalesOrder.objects.create(customer=supplier, reference=f'{ii}-xyz', description='A test sales order')\n with self.assertRaises(AttributeError):\n print(sales_order.reference_int)\n self.po_pk = PurchaseOrder.objects.create(supplier=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n self.so_pk = SalesOrder.objects.create(customer=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n<|end_body_0|>\n\n<|body_start_1|>\n PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n po = PurchaseOrder.objects.get(reference=f'{ii}-abcde')\n so = SalesOrder.objects.get(reference=f'{ii}-xyz')\n self.assertEqual(po.reference_int, ii)\n self.assertEqual(so.reference_int, ii)\n po = PurchaseOrder.objects.get(pk=self.po_pk)\n self.assertEqual(po.reference, '999999999999999999999999999999999')\n self.assertEqual(po.reference_int, 2147483647)\n so = SalesOrder.objects.get(pk=self.so_pk)\n self.assertEqual(so.reference, '999999999999999999999999999999999')\n self.assertEqual(so.reference_int, 2147483647)\n<|end_body_1|>\n", "revision_id": "e88a8e99a5f0b201c67a95cba097c729f090d5e2", "skeleton": "<|skeleton|>\nclass TestRefIntMigrations:\n \"\"\"Test entire schema migration.\"\"\"\n\n def prepare(self):\n \"\"\"Create initial data set.\"\"\"\n <|body_0|>\n\n def test_ref_field(self):\n \"\"\"Test that the 'reference_int' field has been created and is filled out correctly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestRefIntMigrations:\n \"\"\"Test entire schema migration.\"\"\"\n\n def prepare(self):\n \"\"\"Create initial data set.\"\"\"\n Company = self.old_state.apps.get_model('company', 'company')\n supplier = Company.objects.create(name='Supplier A', description='A great supplier!', is_supplier=True, is_customer=True)\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n order = PurchaseOrder.objects.create(supplier=supplier, reference=f'{ii}-abcde', description='Just a test order')\n with self.assertRaises(AttributeError):\n print(order.reference_int)\n sales_order = SalesOrder.objects.create(customer=supplier, reference=f'{ii}-xyz', description='A test sales order')\n with self.assertRaises(AttributeError):\n print(sales_order.reference_int)\n self.po_pk = PurchaseOrder.objects.create(supplier=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n self.so_pk = SalesOrder.objects.create(customer=supplier, reference='999999999999999999999999999999999', description='Big reference field').pk\n\n def test_ref_field(self):\n \"\"\"Test that the 'reference_int' field has been created and is filled out correctly.\"\"\"\n PurchaseOrder = self.new_state.apps.get_model('order', 'purchaseorder')\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n for ii in range(10):\n po = PurchaseOrder.objects.get(reference=f'{ii}-abcde')\n so = SalesOrder.objects.get(reference=f'{ii}-xyz')\n self.assertEqual(po.reference_int, ii)\n self.assertEqual(so.reference_int, ii)\n po = PurchaseOrder.objects.get(pk=self.po_pk)\n self.assertEqual(po.reference, '999999999999999999999999999999999')\n self.assertEqual(po.reference_int, 2147483647)\n so = SalesOrder.objects.get(pk=self.so_pk)\n self.assertEqual(so.reference, '999999999999999999999999999999999')\n self.assertEqual(so.reference_int, 2147483647)\n", "source": "the_stack_v2_python_sparse", "source_path": "InvenTree/order/test_migrations.py", "source_repo": "inventree/InvenTree", "split": "val", "star_events_count": 3077} {"blob_id": "bde617e1b5e17160041b334e5b1c5d0319731dde", "bodies": ["res = []\n\ndef dfs(root):\n if not root:\n return\n res.append(root.val)\n for child in root.children:\n dfs(child)\ndfs(root)\nreturn res", "stack = [root]\nres = []\nif not root:\n return []\nwhile stack:\n root = stack.pop()\n res.append(root.val)\n stack.extend(root.children[::-1])\nreturn res"], "bodies_text": "<|body_start_0|>\n res = []\n\n def dfs(root):\n if not root:\n return\n res.append(root.val)\n for child in root.children:\n dfs(child)\n dfs(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n stack = [root]\n res = []\n if not root:\n return []\n while stack:\n root = stack.pop()\n res.append(root.val)\n stack.extend(root.children[::-1])\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def preorder(self, root: 'Node') -> List[int]:\n \"\"\"简单的递归遍历 :param root: :return:\"\"\"\n <|body_0|>\n\n def preorder1(self, root: 'Node') -> List[int]:\n \"\"\"使用迭代的方式,自己维护一个栈 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def dfs(root):\n if not root:\n return\n res.append(root.val)\n for child in root.children:\n dfs(child)\n dfs(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n stack = [root]\n res = []\n if not root:\n return []\n while stack:\n root = stack.pop()\n res.append(root.val)\n stack.extend(root.children[::-1])\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000056", "length_bytes": 1129, "license_type": "no_license", "methods": [{"docstring": "简单的递归遍历 :param root: :return:", "name": "preorder", "signature": "def preorder(self, root: 'Node') -> List[int]"}, {"docstring": "使用迭代的方式,自己维护一个栈 :param root: :return:", "name": "preorder1", "signature": "def preorder1(self, root: 'Node') -> List[int]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004913", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def preorder(self, root: 'Node') -> List[int]: 简单的递归遍历 :param root: :return:\n- def preorder1(self, root: 'Node') -> List[int]: 使用迭代的方式,自己维护一个栈 :param root: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def preorder(self, root: 'Node') -> List[int]: 简单的递归遍历 :param root: :return:\n- def preorder1(self, root: 'Node') -> List[int]: 使用迭代的方式,自己维护一个栈 :param root: :return:\n\n<|skeleton|>\nclass Solution:\n\n def preorder(self, root: 'Node') -> List[int]:\n \"\"\"简单的递归遍历 :param root: :return:\"\"\"\n <|body_0|>\n\n def preorder1(self, root: 'Node') -> List[int]:\n \"\"\"使用迭代的方式,自己维护一个栈 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def dfs(root):\n if not root:\n return\n res.append(root.val)\n for child in root.children:\n dfs(child)\n dfs(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n stack = [root]\n res = []\n if not root:\n return []\n while stack:\n root = stack.pop()\n res.append(root.val)\n stack.extend(root.children[::-1])\n return res\n<|end_body_1|>\n", "revision_id": "578cacff5851c5c2522981693c34e3c318002d30", "skeleton": "<|skeleton|>\nclass Solution:\n\n def preorder(self, root: 'Node') -> List[int]:\n \"\"\"简单的递归遍历 :param root: :return:\"\"\"\n <|body_0|>\n\n def preorder1(self, root: 'Node') -> List[int]:\n \"\"\"使用迭代的方式,自己维护一个栈 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def preorder(self, root: 'Node') -> List[int]:\n \"\"\"简单的递归遍历 :param root: :return:\"\"\"\n res = []\n\n def dfs(root):\n if not root:\n return\n res.append(root.val)\n for child in root.children:\n dfs(child)\n dfs(root)\n return res\n\n def preorder1(self, root: 'Node') -> List[int]:\n \"\"\"使用迭代的方式,自己维护一个栈 :param root: :return:\"\"\"\n stack = [root]\n res = []\n if not root:\n return []\n while stack:\n root = stack.pop()\n res.append(root.val)\n stack.extend(root.children[::-1])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "N叉树的前序遍历.py", "source_repo": "cjrzs/MyLeetCode", "split": "val", "star_events_count": 8} {"blob_id": "3ac608bfd94df299e5db69490b343ffb65b62e61", "bodies": ["dp = [float('inf')] * (n + 1)\ndp[0] = 0\nfor i in range(1, n + 1):\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\nreturn dp[n]", "queue = [node(n)]\nvisited = set([node(n).value])\nwhile queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n * n for n in range(1, int(vertex.value ** 0.5) + 1)]\n for i in residuals:\n new_vertex = node(i, vertex.step + 1)\n if i == 0:\n return new_vertex.step\n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\nreturn -1"], "bodies_text": "<|body_start_0|>\n dp = [float('inf')] * (n + 1)\n dp[0] = 0\n for i in range(1, n + 1):\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n queue = [node(n)]\n visited = set([node(n).value])\n while queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n * n for n in range(1, int(vertex.value ** 0.5) + 1)]\n for i in residuals:\n new_vertex = node(i, vertex.step + 1)\n if i == 0:\n return new_vertex.step\n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\n return -1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numSquares_dp(self, n):\n \"\"\":type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\"\"\"\n <|body_0|>\n\n def numSquares_bfs(self, n):\n \"\"\":type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [float('inf')] * (n + 1)\n dp[0] = 0\n for i in range(1, n + 1):\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n queue = [node(n)]\n visited = set([node(n).value])\n while queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n * n for n in range(1, int(vertex.value ** 0.5) + 1)]\n for i in residuals:\n new_vertex = node(i, vertex.step + 1)\n if i == 0:\n return new_vertex.step\n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\n return -1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000057", "length_bytes": 2613, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1", "name": "numSquares_dp", "signature": "def numSquares_dp(self, n)"}, {"docstring": ":type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1", "name": "numSquares_bfs", "signature": "def numSquares_bfs(self, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_dp(self, n): :type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\n- def numSquares_bfs(self, n): :type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_dp(self, n): :type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\n- def numSquares_bfs(self, n): :type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1\n\n<|skeleton|>\nclass Solution:\n\n def numSquares_dp(self, n):\n \"\"\":type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\"\"\"\n <|body_0|>\n\n def numSquares_bfs(self, n):\n \"\"\":type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [float('inf')] * (n + 1)\n dp[0] = 0\n for i in range(1, n + 1):\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\n return dp[n]\n<|end_body_0|>\n\n<|body_start_1|>\n queue = [node(n)]\n visited = set([node(n).value])\n while queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n * n for n in range(1, int(vertex.value ** 0.5) + 1)]\n for i in residuals:\n new_vertex = node(i, vertex.step + 1)\n if i == 0:\n return new_vertex.step\n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\n return -1\n<|end_body_1|>\n", "revision_id": "3f4284330f9771037ca59e2e6a94122e51e58540", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numSquares_dp(self, n):\n \"\"\":type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\"\"\"\n <|body_0|>\n\n def numSquares_bfs(self, n):\n \"\"\":type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numSquares_dp(self, n):\n \"\"\":type n: int :rtype: int dp[i]代表到数字i组成最少完全平方个数 动态转移方程:dp[i] = min(dp[i], dp[i - j * j] + 1), j <= sqrt{i} + 1\"\"\"\n dp = [float('inf')] * (n + 1)\n dp[0] = 0\n for i in range(1, n + 1):\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\n return dp[n]\n\n def numSquares_bfs(self, n):\n \"\"\":type n: int :rtype: int BFS 其实是很简单的基础算法,抓住如下几点即可轻松写出不易错的 baseline: BFS 算法组成的 3 元素:队列,入队出队的节点,已访问的集合。 队列:先入先出的容器; 节点:最好写成单独的类,比如本例写成 (value,step) 元组。也可写成 (value,visited),看自己喜好和题目; 已访问集合:为了避免队列中插入重复的值 BFS算法组成的套路: 初始化三元素: Node = node(n) queue = [Node] visited = set([Node.value]) 操作队列 —— 弹出队首节点: vertex = queue.pop(0) 操作弹出的节点 —— 根据业务生成子节点(一个或多个): [node(vertex.value - n*n, Node.step+1) for n in range(1,int(vertex.value**.5)+1)] 判断这些节点 —— 符合业务条件,则return,不符合业务条件,且不在已访问集合,则追加到队尾,并加入已访问集合: if i==0: return new_vertex.step elif i not in visited: queue.append(new_vertex) visited.add(i) 若以上遍历完成仍未return,下面操作返回未找到代码: return -1\"\"\"\n queue = [node(n)]\n visited = set([node(n).value])\n while queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n * n for n in range(1, int(vertex.value ** 0.5) + 1)]\n for i in residuals:\n new_vertex = node(i, vertex.step + 1)\n if i == 0:\n return new_vertex.step\n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\n return -1\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/279.完全平方数.py", "source_repo": "myf-algorithm/Leetcode", "split": "val", "star_events_count": 1} {"blob_id": "24f201f8fefa926be75ce2c69d19785742966365", "bodies": ["classname = self.__class__.__name__\nobj_dir = settings.CLASSNAME_TO_DIR[classname]\nobj_path = os.path.join(obj_dir, self.id)\npickle.dump(self, open(obj_path, 'wb'))", "classname = cls.__name__\nobj_dir = settings.CLASSNAME_TO_DIR[classname]\nobj_list = []\nfor obj_name in os.listdir(obj_dir):\n obj_path = os.path.join(obj_dir, obj_name)\n obj = pickle.load(open(obj_path, 'rb'))\n obj_list.append(obj)\nreturn obj_list", "obj_list = self.get_all_obj()\nobj_username_list = []\nfor obj in obj_list:\n obj_username_list.append(obj.username)\nwhile True:\n md5 = hashlib.md5()\n username = input('请输入用户名:').strip()\n password = input('请输入密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n if username in obj_username_list:\n print('\\x1b[31;1m用户名已存在\\x1b[0m')\n continue\n self.username = username\n self.password = password\n break\nself.save()\nlogger.info('%s注册了该系统!' % username)", "obj_list = cls.get_all_obj()\ncount = 0\nwhile count < 3:\n md5 = hashlib.md5()\n username = input('用户名:').strip()\n password = input('密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n for obj in obj_list:\n if username == obj.username and password == obj.password:\n print('\\x1b[32;1m登陆成功!\\x1b[0m')\n logger.info('%s登陆了该系统!' % username)\n status = 'done'\n login_obj = obj\n return {'status': status, 'obj': login_obj}\n print('\\x1b[31;1m用户名或密码错误!\\x1b[0m')\n count += 1\nelse:\n print('\\x1b[31;1m您已尝试太多次了,请下次再试!\\x1b[0m')\n exit()"], "bodies_text": "<|body_start_0|>\n classname = self.__class__.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_path = os.path.join(obj_dir, self.id)\n pickle.dump(self, open(obj_path, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n classname = cls.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_list = []\n for obj_name in os.listdir(obj_dir):\n obj_path = os.path.join(obj_dir, obj_name)\n obj = pickle.load(open(obj_path, 'rb'))\n obj_list.append(obj)\n return obj_list\n<|end_body_1|>\n\n<|body_start_2|>\n obj_list = self.get_all_obj()\n obj_username_list = []\n for obj in obj_list:\n obj_username_list.append(obj.username)\n while True:\n md5 = hashlib.md5()\n username = input('请输入用户名:').strip()\n password = input('请输入密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n if username in obj_username_list:\n print('\\x1b[31;1m用户名已存在\\x1b[0m')\n continue\n self.username = username\n self.password = password\n break\n self.save()\n logger.info('%s注册了该系统!' % username)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_list = cls.get_all_obj()\n count = 0\n while count < 3:\n md5 = hashlib.md5()\n username = input('用户名:').strip()\n password = input('密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n for obj in obj_list:\n if username == obj.username and password == obj.password:\n print('\\x1b[32;1m登陆成功!\\x1b[0m')\n logger.info('%s登陆了该系统!' % username)\n status = 'done'\n login_obj = obj\n return {'status': status, 'obj': login_obj}\n print('\\x1b[31;1m用户名或密码错误!\\x1b[0m')\n count += 1\n else:\n print('\\x1b[31;1m您已尝试太多次了,请下次再试!\\x1b[0m')\n exit()\n<|end_body_3|>\n", "class_docstring": "基础功能类", "class_name": "Base", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Base:\n \"\"\"基础功能类\"\"\"\n\n def save(self):\n \"\"\"存储对象功能\"\"\"\n <|body_0|>\n\n def get_all_obj(cls):\n \"\"\"获取所有对象功能\"\"\"\n <|body_1|>\n\n def enroll(self, logger):\n \"\"\"注册功能\"\"\"\n <|body_2|>\n\n def login(cls, logger):\n \"\"\"登陆功能\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n classname = self.__class__.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_path = os.path.join(obj_dir, self.id)\n pickle.dump(self, open(obj_path, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n classname = cls.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_list = []\n for obj_name in os.listdir(obj_dir):\n obj_path = os.path.join(obj_dir, obj_name)\n obj = pickle.load(open(obj_path, 'rb'))\n obj_list.append(obj)\n return obj_list\n<|end_body_1|>\n\n<|body_start_2|>\n obj_list = self.get_all_obj()\n obj_username_list = []\n for obj in obj_list:\n obj_username_list.append(obj.username)\n while True:\n md5 = hashlib.md5()\n username = input('请输入用户名:').strip()\n password = input('请输入密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n if username in obj_username_list:\n print('\\x1b[31;1m用户名已存在\\x1b[0m')\n continue\n self.username = username\n self.password = password\n break\n self.save()\n logger.info('%s注册了该系统!' % username)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_list = cls.get_all_obj()\n count = 0\n while count < 3:\n md5 = hashlib.md5()\n username = input('用户名:').strip()\n password = input('密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n for obj in obj_list:\n if username == obj.username and password == obj.password:\n print('\\x1b[32;1m登陆成功!\\x1b[0m')\n logger.info('%s登陆了该系统!' % username)\n status = 'done'\n login_obj = obj\n return {'status': status, 'obj': login_obj}\n print('\\x1b[31;1m用户名或密码错误!\\x1b[0m')\n count += 1\n else:\n print('\\x1b[31;1m您已尝试太多次了,请下次再试!\\x1b[0m')\n exit()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000058", "length_bytes": 2893, "license_type": "no_license", "methods": [{"docstring": "存储对象功能", "name": "save", "signature": "def save(self)"}, {"docstring": "获取所有对象功能", "name": "get_all_obj", "signature": "def get_all_obj(cls)"}, {"docstring": "注册功能", "name": "enroll", "signature": "def enroll(self, logger)"}, {"docstring": "登陆功能", "name": "login", "signature": "def login(cls, logger)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006688", "prompt": "Implement the Python class `Base` described below.\n\nClass description:\n基础功能类\n\nMethod signatures and docstrings:\n- def save(self): 存储对象功能\n- def get_all_obj(cls): 获取所有对象功能\n- def enroll(self, logger): 注册功能\n- def login(cls, logger): 登陆功能", "prompted_full_text": "Implement the Python class `Base` described below.\n\nClass description:\n基础功能类\n\nMethod signatures and docstrings:\n- def save(self): 存储对象功能\n- def get_all_obj(cls): 获取所有对象功能\n- def enroll(self, logger): 注册功能\n- def login(cls, logger): 登陆功能\n\n<|skeleton|>\nclass Base:\n \"\"\"基础功能类\"\"\"\n\n def save(self):\n \"\"\"存储对象功能\"\"\"\n <|body_0|>\n\n def get_all_obj(cls):\n \"\"\"获取所有对象功能\"\"\"\n <|body_1|>\n\n def enroll(self, logger):\n \"\"\"注册功能\"\"\"\n <|body_2|>\n\n def login(cls, logger):\n \"\"\"登陆功能\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n classname = self.__class__.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_path = os.path.join(obj_dir, self.id)\n pickle.dump(self, open(obj_path, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n classname = cls.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_list = []\n for obj_name in os.listdir(obj_dir):\n obj_path = os.path.join(obj_dir, obj_name)\n obj = pickle.load(open(obj_path, 'rb'))\n obj_list.append(obj)\n return obj_list\n<|end_body_1|>\n\n<|body_start_2|>\n obj_list = self.get_all_obj()\n obj_username_list = []\n for obj in obj_list:\n obj_username_list.append(obj.username)\n while True:\n md5 = hashlib.md5()\n username = input('请输入用户名:').strip()\n password = input('请输入密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n if username in obj_username_list:\n print('\\x1b[31;1m用户名已存在\\x1b[0m')\n continue\n self.username = username\n self.password = password\n break\n self.save()\n logger.info('%s注册了该系统!' % username)\n<|end_body_2|>\n\n<|body_start_3|>\n obj_list = cls.get_all_obj()\n count = 0\n while count < 3:\n md5 = hashlib.md5()\n username = input('用户名:').strip()\n password = input('密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n for obj in obj_list:\n if username == obj.username and password == obj.password:\n print('\\x1b[32;1m登陆成功!\\x1b[0m')\n logger.info('%s登陆了该系统!' % username)\n status = 'done'\n login_obj = obj\n return {'status': status, 'obj': login_obj}\n print('\\x1b[31;1m用户名或密码错误!\\x1b[0m')\n count += 1\n else:\n print('\\x1b[31;1m您已尝试太多次了,请下次再试!\\x1b[0m')\n exit()\n<|end_body_3|>\n", "revision_id": "4d497a6261de17cc2fc058cea50e127e885e5095", "skeleton": "<|skeleton|>\nclass Base:\n \"\"\"基础功能类\"\"\"\n\n def save(self):\n \"\"\"存储对象功能\"\"\"\n <|body_0|>\n\n def get_all_obj(cls):\n \"\"\"获取所有对象功能\"\"\"\n <|body_1|>\n\n def enroll(self, logger):\n \"\"\"注册功能\"\"\"\n <|body_2|>\n\n def login(cls, logger):\n \"\"\"登陆功能\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Base:\n \"\"\"基础功能类\"\"\"\n\n def save(self):\n \"\"\"存储对象功能\"\"\"\n classname = self.__class__.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_path = os.path.join(obj_dir, self.id)\n pickle.dump(self, open(obj_path, 'wb'))\n\n def get_all_obj(cls):\n \"\"\"获取所有对象功能\"\"\"\n classname = cls.__name__\n obj_dir = settings.CLASSNAME_TO_DIR[classname]\n obj_list = []\n for obj_name in os.listdir(obj_dir):\n obj_path = os.path.join(obj_dir, obj_name)\n obj = pickle.load(open(obj_path, 'rb'))\n obj_list.append(obj)\n return obj_list\n\n def enroll(self, logger):\n \"\"\"注册功能\"\"\"\n obj_list = self.get_all_obj()\n obj_username_list = []\n for obj in obj_list:\n obj_username_list.append(obj.username)\n while True:\n md5 = hashlib.md5()\n username = input('请输入用户名:').strip()\n password = input('请输入密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n if username in obj_username_list:\n print('\\x1b[31;1m用户名已存在\\x1b[0m')\n continue\n self.username = username\n self.password = password\n break\n self.save()\n logger.info('%s注册了该系统!' % username)\n\n def login(cls, logger):\n \"\"\"登陆功能\"\"\"\n obj_list = cls.get_all_obj()\n count = 0\n while count < 3:\n md5 = hashlib.md5()\n username = input('用户名:').strip()\n password = input('密码:').strip()\n if len(username) == 0 or len(password) == 0:\n continue\n md5.update(password.encode())\n password = md5.hexdigest()\n for obj in obj_list:\n if username == obj.username and password == obj.password:\n print('\\x1b[32;1m登陆成功!\\x1b[0m')\n logger.info('%s登陆了该系统!' % username)\n status = 'done'\n login_obj = obj\n return {'status': status, 'obj': login_obj}\n print('\\x1b[31;1m用户名或密码错误!\\x1b[0m')\n count += 1\n else:\n print('\\x1b[31;1m您已尝试太多次了,请下次再试!\\x1b[0m')\n exit()\n", "source": "the_stack_v2_python_sparse", "source_path": "day09/LikeFabric/core/base.py", "source_repo": "phully/PythonHomeWork", "split": "val", "star_events_count": 0} {"blob_id": "7ffc2ef2ec955cff639e2b275b0d2ee44708282f", "bodies": ["if not root:\n return None\nelse:\n root.left, root.right = (self.invertTree1(root.right), self.invertTree1(root.left))\nreturn root", "if not root:\n return root\nstack = [root]\nwhile stack:\n node = stack.pop()\n if node:\n node.left, node.right = (node.right, node.left)\n stack += [node.left, node.right]\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return None\n else:\n root.left, root.right = (self.invertTree1(root.right), self.invertTree1(root.left))\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = (node.right, node.left)\n stack += [node.left, node.right]\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def invertTree2(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n else:\n root.left, root.right = (self.invertTree1(root.right), self.invertTree1(root.left))\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = (node.right, node.left)\n stack += [node.left, node.right]\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000059", "length_bytes": 916, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "invertTree1", "signature": "def invertTree1(self, root)"}, {"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "invertTree2", "signature": "def invertTree2(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006821", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def invertTree1(self, root): :type root: TreeNode :rtype: TreeNode\n- def invertTree2(self, root): :type root: TreeNode :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def invertTree1(self, root): :type root: TreeNode :rtype: TreeNode\n- def invertTree2(self, root): :type root: TreeNode :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def invertTree2(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n else:\n root.left, root.right = (self.invertTree1(root.right), self.invertTree1(root.left))\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = (node.right, node.left)\n stack += [node.left, node.right]\n return root\n<|end_body_1|>\n", "revision_id": "8fb6c1d947046dabd58ff8482b2c0b41f39aa988", "skeleton": "<|skeleton|>\nclass Solution:\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def invertTree2(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n if not root:\n return None\n else:\n root.left, root.right = (self.invertTree1(root.right), self.invertTree1(root.left))\n return root\n\n def invertTree2(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n if not root:\n return root\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = (node.right, node.left)\n stack += [node.left, node.right]\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/LeetCode/226.py", "source_repo": "czx94/Algorithms-Collection", "split": "val", "star_events_count": 2} {"blob_id": "0cf74fe152a4cf94f4f22190dd45c879a96611e3", "bodies": ["if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\nif isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\ncluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\nwith OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\naws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\nwith schema_context(self._schema):\n self._handle_partitions(('reporting_ocpawscostlineitem_daily_summary', 'reporting_ocpawscostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n aws_bill_ids = [str(bill.id) for bill in aws_bills]\n current_aws_bill_id = aws_bills.first().id if aws_bills else None\n current_ocp_report_period_id = report_period.id\nwith CostModelDBAccessor(self._schema, aws_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\nwith AWSReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on AWS summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, AWS Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_aws_bill_id)\n accessor.populate_ocp_on_aws_cost_daily_summary_presto(start, end, openshift_provider_uuid, aws_provider_uuid, current_ocp_report_period_id, current_aws_bill_id, markup_value)\n accessor.back_populate_ocp_on_aws_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_aws_tags_summary_table(aws_bill_ids, start_date, end_date)", "if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\nif isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\ncluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\nwith OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\nazure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\nwith schema_context(self._schema):\n self._handle_partitions(('reporting_ocpazurecostlineitem_daily_summary', 'reporting_ocpazurecostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n azure_bill_ids = [str(bill.id) for bill in azure_bills]\n current_azure_bill_id = azure_bills.first().id if azure_bills else None\n current_ocp_report_period_id = report_period.id\nwith CostModelDBAccessor(self._schema, azure_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\nwith AzureReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on Azure summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, Azure Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_azure_bill_id)\n accessor.populate_ocp_on_azure_cost_daily_summary_presto(start, end, openshift_provider_uuid, azure_provider_uuid, current_ocp_report_period_id, current_azure_bill_id, markup_value)\n accessor.back_populate_ocp_on_azure_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_azure_tags_summary_table(azure_bill_ids, start_date, end_date)"], "bodies_text": "<|body_start_0|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpawscostlineitem_daily_summary', 'reporting_ocpawscostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n aws_bill_ids = [str(bill.id) for bill in aws_bills]\n current_aws_bill_id = aws_bills.first().id if aws_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, aws_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AWSReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on AWS summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, AWS Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_aws_bill_id)\n accessor.populate_ocp_on_aws_cost_daily_summary_presto(start, end, openshift_provider_uuid, aws_provider_uuid, current_ocp_report_period_id, current_aws_bill_id, markup_value)\n accessor.back_populate_ocp_on_aws_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_aws_tags_summary_table(aws_bill_ids, start_date, end_date)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpazurecostlineitem_daily_summary', 'reporting_ocpazurecostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n azure_bill_ids = [str(bill.id) for bill in azure_bills]\n current_azure_bill_id = azure_bills.first().id if azure_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, azure_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AzureReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on Azure summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, Azure Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_azure_bill_id)\n accessor.populate_ocp_on_azure_cost_daily_summary_presto(start, end, openshift_provider_uuid, azure_provider_uuid, current_ocp_report_period_id, current_azure_bill_id, markup_value)\n accessor.back_populate_ocp_on_azure_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_azure_tags_summary_table(azure_bill_ids, start_date, end_date)\n<|end_body_1|>\n", "class_docstring": "Class to update OCP report summary data.", "class_name": "OCPCloudParquetReportSummaryUpdater", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OCPCloudParquetReportSummaryUpdater:\n \"\"\"Class to update OCP report summary data.\"\"\"\n\n def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on AWS.\"\"\"\n <|body_0|>\n\n def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on Azure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpawscostlineitem_daily_summary', 'reporting_ocpawscostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n aws_bill_ids = [str(bill.id) for bill in aws_bills]\n current_aws_bill_id = aws_bills.first().id if aws_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, aws_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AWSReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on AWS summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, AWS Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_aws_bill_id)\n accessor.populate_ocp_on_aws_cost_daily_summary_presto(start, end, openshift_provider_uuid, aws_provider_uuid, current_ocp_report_period_id, current_aws_bill_id, markup_value)\n accessor.back_populate_ocp_on_aws_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_aws_tags_summary_table(aws_bill_ids, start_date, end_date)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpazurecostlineitem_daily_summary', 'reporting_ocpazurecostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n azure_bill_ids = [str(bill.id) for bill in azure_bills]\n current_azure_bill_id = azure_bills.first().id if azure_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, azure_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AzureReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on Azure summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, Azure Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_azure_bill_id)\n accessor.populate_ocp_on_azure_cost_daily_summary_presto(start, end, openshift_provider_uuid, azure_provider_uuid, current_ocp_report_period_id, current_azure_bill_id, markup_value)\n accessor.back_populate_ocp_on_azure_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_azure_tags_summary_table(azure_bill_ids, start_date, end_date)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000060", "length_bytes": 7056, "license_type": "permissive", "methods": [{"docstring": "Update operations specifically for OpenShift on AWS.", "name": "update_aws_summary_tables", "signature": "def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date)"}, {"docstring": "Update operations specifically for OpenShift on Azure.", "name": "update_azure_summary_tables", "signature": "def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005222", "prompt": "Implement the Python class `OCPCloudParquetReportSummaryUpdater` described below.\n\nClass description:\nClass to update OCP report summary data.\n\nMethod signatures and docstrings:\n- def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date): Update operations specifically for OpenShift on AWS.\n- def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date): Update operations specifically for OpenShift on Azure.", "prompted_full_text": "Implement the Python class `OCPCloudParquetReportSummaryUpdater` described below.\n\nClass description:\nClass to update OCP report summary data.\n\nMethod signatures and docstrings:\n- def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date): Update operations specifically for OpenShift on AWS.\n- def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date): Update operations specifically for OpenShift on Azure.\n\n<|skeleton|>\nclass OCPCloudParquetReportSummaryUpdater:\n \"\"\"Class to update OCP report summary data.\"\"\"\n\n def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on AWS.\"\"\"\n <|body_0|>\n\n def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on Azure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpawscostlineitem_daily_summary', 'reporting_ocpawscostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n aws_bill_ids = [str(bill.id) for bill in aws_bills]\n current_aws_bill_id = aws_bills.first().id if aws_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, aws_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AWSReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on AWS summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, AWS Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_aws_bill_id)\n accessor.populate_ocp_on_aws_cost_daily_summary_presto(start, end, openshift_provider_uuid, aws_provider_uuid, current_ocp_report_period_id, current_aws_bill_id, markup_value)\n accessor.back_populate_ocp_on_aws_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_aws_tags_summary_table(aws_bill_ids, start_date, end_date)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpazurecostlineitem_daily_summary', 'reporting_ocpazurecostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n azure_bill_ids = [str(bill.id) for bill in azure_bills]\n current_azure_bill_id = azure_bills.first().id if azure_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, azure_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AzureReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on Azure summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, Azure Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_azure_bill_id)\n accessor.populate_ocp_on_azure_cost_daily_summary_presto(start, end, openshift_provider_uuid, azure_provider_uuid, current_ocp_report_period_id, current_azure_bill_id, markup_value)\n accessor.back_populate_ocp_on_azure_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_azure_tags_summary_table(azure_bill_ids, start_date, end_date)\n<|end_body_1|>\n", "revision_id": "88e2d679148d0e4735c5018faada638f73d4dc5c", "skeleton": "<|skeleton|>\nclass OCPCloudParquetReportSummaryUpdater:\n \"\"\"Class to update OCP report summary data.\"\"\"\n\n def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on AWS.\"\"\"\n <|body_0|>\n\n def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on Azure.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OCPCloudParquetReportSummaryUpdater:\n \"\"\"Class to update OCP report summary data.\"\"\"\n\n def update_aws_summary_tables(self, openshift_provider_uuid, aws_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on AWS.\"\"\"\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpawscostlineitem_daily_summary', 'reporting_ocpawscostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n aws_bills = aws_get_bills_from_provider(aws_provider_uuid, self._schema, start_date, end_date)\n aws_bill_ids = [str(bill.id) for bill in aws_bills]\n current_aws_bill_id = aws_bills.first().id if aws_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, aws_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AWSReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on AWS summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, AWS Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_aws_bill_id)\n accessor.populate_ocp_on_aws_cost_daily_summary_presto(start, end, openshift_provider_uuid, aws_provider_uuid, current_ocp_report_period_id, current_aws_bill_id, markup_value)\n accessor.back_populate_ocp_on_aws_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_aws_tags_summary_table(aws_bill_ids, start_date, end_date)\n\n def update_azure_summary_tables(self, openshift_provider_uuid, azure_provider_uuid, start_date, end_date):\n \"\"\"Update operations specifically for OpenShift on Azure.\"\"\"\n if isinstance(start_date, str):\n start_date = parser.parse(start_date).date()\n if isinstance(end_date, str):\n end_date = parser.parse(end_date).date()\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n with OCPReportDBAccessor(self._schema) as accessor:\n report_period = accessor.report_periods_for_provider_uuid(openshift_provider_uuid, start_date)\n accessor.delete_infrastructure_raw_cost_from_daily_summary(openshift_provider_uuid, report_period.id, start_date, end_date)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n with schema_context(self._schema):\n self._handle_partitions(('reporting_ocpazurecostlineitem_daily_summary', 'reporting_ocpazurecostlineitem_project_daily_summary'), start_date, end_date)\n cluster_id = get_cluster_id_from_provider(openshift_provider_uuid)\n azure_bills = azure_get_bills_from_provider(azure_provider_uuid, self._schema, start_date, end_date)\n azure_bill_ids = [str(bill.id) for bill in azure_bills]\n current_azure_bill_id = azure_bills.first().id if azure_bills else None\n current_ocp_report_period_id = report_period.id\n with CostModelDBAccessor(self._schema, azure_provider_uuid) as cost_model_accessor:\n markup = cost_model_accessor.markup\n markup_value = Decimal(markup.get('value', 0)) / 100\n with AzureReportDBAccessor(self._schema) as accessor:\n for start, end in date_range_pair(start_date, end_date, step=settings.TRINO_DATE_STEP):\n LOG.info('Updating OpenShift on Azure summary table for \\n\\tSchema: %s \\n\\tProvider: %s \\n\\tDates: %s - %s\\n\\tCluster ID: %s, Azure Bill ID: %s', self._schema, self._provider.uuid, start, end, cluster_id, current_azure_bill_id)\n accessor.populate_ocp_on_azure_cost_daily_summary_presto(start, end, openshift_provider_uuid, azure_provider_uuid, current_ocp_report_period_id, current_azure_bill_id, markup_value)\n accessor.back_populate_ocp_on_azure_daily_summary(start_date, end_date, current_ocp_report_period_id)\n accessor.populate_ocp_on_azure_tags_summary_table(azure_bill_ids, start_date, end_date)\n", "source": "the_stack_v2_python_sparse", "source_path": "koku/masu/processor/ocp/ocp_cloud_parquet_summary_updater.py", "source_repo": "pavanyadavalli/koku", "split": "val", "star_events_count": 2} {"blob_id": "507a01d9702a9803a8505889b8854a12345e8ea2", "bodies": ["self.children_count = children_count\nself.dc_list = dc_list\nself.replication_strategy = replication_strategy", "if dictionary is None:\n return None\nchildren_count = dictionary.get('childrenCount')\ndc_list = dictionary.get('dcList')\nreplication_strategy = dictionary.get('replicationStrategy')\nreturn cls(children_count, dc_list, replication_strategy)"], "bodies_text": "<|body_start_0|>\n self.children_count = children_count\n self.dc_list = dc_list\n self.replication_strategy = replication_strategy\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n children_count = dictionary.get('childrenCount')\n dc_list = dictionary.get('dcList')\n replication_strategy = dictionary.get('replicationStrategy')\n return cls(children_count, dc_list, replication_strategy)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.", "class_name": "CassandraKeyspace", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CassandraKeyspace:\n \"\"\"Implementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\"\"\"\n\n def __init__(self, children_count=None, dc_list=None, replication_strategy=None):\n \"\"\"Constructor for the CassandraKeyspace class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.children_count = children_count\n self.dc_list = dc_list\n self.replication_strategy = replication_strategy\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n children_count = dictionary.get('childrenCount')\n dc_list = dictionary.get('dcList')\n replication_strategy = dictionary.get('replicationStrategy')\n return cls(children_count, dc_list, replication_strategy)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000061", "length_bytes": 2167, "license_type": "permissive", "methods": [{"docstring": "Constructor for the CassandraKeyspace class", "name": "__init__", "signature": "def __init__(self, children_count=None, dc_list=None, replication_strategy=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `CassandraKeyspace` described below.\n\nClass description:\nImplementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\n\nMethod signatures and docstrings:\n- def __init__(self, children_count=None, dc_list=None, replication_strategy=None): Constructor for the CassandraKeyspace class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `CassandraKeyspace` described below.\n\nClass description:\nImplementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\n\nMethod signatures and docstrings:\n- def __init__(self, children_count=None, dc_list=None, replication_strategy=None): Constructor for the CassandraKeyspace class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass CassandraKeyspace:\n \"\"\"Implementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\"\"\"\n\n def __init__(self, children_count=None, dc_list=None, replication_strategy=None):\n \"\"\"Constructor for the CassandraKeyspace class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.children_count = children_count\n self.dc_list = dc_list\n self.replication_strategy = replication_strategy\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n children_count = dictionary.get('childrenCount')\n dc_list = dictionary.get('dcList')\n replication_strategy = dictionary.get('replicationStrategy')\n return cls(children_count, dc_list, replication_strategy)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass CassandraKeyspace:\n \"\"\"Implementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\"\"\"\n\n def __init__(self, children_count=None, dc_list=None, replication_strategy=None):\n \"\"\"Constructor for the CassandraKeyspace class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CassandraKeyspace:\n \"\"\"Implementation of the 'CassandraKeyspace' model. Specifies an Object containing information about a Cassandra Keyspace. Attributes: children_count (int): Number of documents in this bucket. dc_list (list of string): If the replication strategy is set as kNetwork, then dc_list will have a list of data centers to which the keyspace is being replicated to. replication_strategy (ReplicationStrategyEnum): Replication stragegy for the keyspace. Specifies the type of an Cassandra source entity.\"\"\"\n\n def __init__(self, children_count=None, dc_list=None, replication_strategy=None):\n \"\"\"Constructor for the CassandraKeyspace class\"\"\"\n self.children_count = children_count\n self.dc_list = dc_list\n self.replication_strategy = replication_strategy\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n children_count = dictionary.get('childrenCount')\n dc_list = dictionary.get('dcList')\n replication_strategy = dictionary.get('replicationStrategy')\n return cls(children_count, dc_list, replication_strategy)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/cassandra_keyspace.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "1161efc06d3392ed3d1fb57e535b125da2c55bcf", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "class_docstring": "This file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.", "class_name": "TraceServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TraceServiceServicer:\n \"\"\"This file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\"\"\"\n\n def ListTraces(self, request, context):\n \"\"\"Returns of a list of traces that match the specified filter conditions.\"\"\"\n <|body_0|>\n\n def GetTrace(self, request, context):\n \"\"\"Gets a single trace by its ID.\"\"\"\n <|body_1|>\n\n def PatchTraces(self, request, context):\n \"\"\"Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000062", "length_bytes": 4755, "license_type": "permissive", "methods": [{"docstring": "Returns of a list of traces that match the specified filter conditions.", "name": "ListTraces", "signature": "def ListTraces(self, request, context)"}, {"docstring": "Gets a single trace by its ID.", "name": "GetTrace", "signature": "def GetTrace(self, request, context)"}, {"docstring": "Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.", "name": "PatchTraces", "signature": "def PatchTraces(self, request, context)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006244", "prompt": "Implement the Python class `TraceServiceServicer` described below.\n\nClass description:\nThis file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\n\nMethod signatures and docstrings:\n- def ListTraces(self, request, context): Returns of a list of traces that match the specified filter conditions.\n- def GetTrace(self, request, context): Gets a single trace by its ID.\n- def PatchTraces(self, request, context): Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.", "prompted_full_text": "Implement the Python class `TraceServiceServicer` described below.\n\nClass description:\nThis file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\n\nMethod signatures and docstrings:\n- def ListTraces(self, request, context): Returns of a list of traces that match the specified filter conditions.\n- def GetTrace(self, request, context): Gets a single trace by its ID.\n- def PatchTraces(self, request, context): Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.\n\n<|skeleton|>\nclass TraceServiceServicer:\n \"\"\"This file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\"\"\"\n\n def ListTraces(self, request, context):\n \"\"\"Returns of a list of traces that match the specified filter conditions.\"\"\"\n <|body_0|>\n\n def GetTrace(self, request, context):\n \"\"\"Gets a single trace by its ID.\"\"\"\n <|body_1|>\n\n def PatchTraces(self, request, context):\n \"\"\"Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "revision_id": "d897d56bce03d1fda98b79afb08264e51d46c421", "skeleton": "<|skeleton|>\nclass TraceServiceServicer:\n \"\"\"This file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\"\"\"\n\n def ListTraces(self, request, context):\n \"\"\"Returns of a list of traces that match the specified filter conditions.\"\"\"\n <|body_0|>\n\n def GetTrace(self, request, context):\n \"\"\"Gets a single trace by its ID.\"\"\"\n <|body_1|>\n\n def PatchTraces(self, request, context):\n \"\"\"Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TraceServiceServicer:\n \"\"\"This file describes an API for collecting and viewing traces and spans within a trace. A Trace is a collection of spans corresponding to a single operation or set of operations for an application. A span is an individual timed event which forms a node of the trace tree. Spans for a single trace may span multiple services.\"\"\"\n\n def ListTraces(self, request, context):\n \"\"\"Returns of a list of traces that match the specified filter conditions.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTrace(self, request, context):\n \"\"\"Gets a single trace by its ID.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PatchTraces(self, request, context):\n \"\"\"Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "trace/google/cloud/trace_v1/proto/trace_pb2_grpc.py", "source_repo": "tswast/google-cloud-python", "split": "val", "star_events_count": 1} {"blob_id": "7fab7d663a19374823121e2d99e1a4af22498918", "bodies": ["super(ExclusionFilter, self).__init__(order)\nself.info = 'Filters search results based on a exclusion list.'\nself.exclusion_list_string = ' '.join(filter(str.isalpha, terms.replace('+', ' ').lower().split()))\nself.filter_fields = ['title', 'summary']\nfor field in customFields:\n self.filter_fields.append(field)", "input_list = input_string.split()\nexclusion_list = self.exclusion_list_string.split()\nfor input in input_list:\n try:\n input_filtered = ''.join(filter(str.isalpha, list(input.lower())))\n except TypeError:\n tmp = input.encode('utf-8').lower()\n input_filtered = ''.join(filter(str.isalpha, list(tmp)))\n if input_filtered in exclusion_list:\n if input_filtered not in ' ':\n return True\nreturn False", "def fieldValid(result, field):\n \"\"\" This method checks the custom field exists and is also of format str/unicode \"\"\"\n if result.has_key(field):\n if type(result[field]) == str or type(result[field]) == unicode:\n return True\n else:\n print('Error: custom field {0} was of {1} instead of str/unicode'.format(field, type(result[field])))\n return False\n else:\n print('Error: custom field {0} was not found in the results'.format(field))\n return False\nfor result in results:\n valid = True\n for field in self.filter_fields:\n if fieldValid(result, field):\n valid = not self.matches_exclusion_list(result[field])\n if valid == False:\n break\n if valid == True:\n yield result"], "bodies_text": "<|body_start_0|>\n super(ExclusionFilter, self).__init__(order)\n self.info = 'Filters search results based on a exclusion list.'\n self.exclusion_list_string = ' '.join(filter(str.isalpha, terms.replace('+', ' ').lower().split()))\n self.filter_fields = ['title', 'summary']\n for field in customFields:\n self.filter_fields.append(field)\n<|end_body_0|>\n\n<|body_start_1|>\n input_list = input_string.split()\n exclusion_list = self.exclusion_list_string.split()\n for input in input_list:\n try:\n input_filtered = ''.join(filter(str.isalpha, list(input.lower())))\n except TypeError:\n tmp = input.encode('utf-8').lower()\n input_filtered = ''.join(filter(str.isalpha, list(tmp)))\n if input_filtered in exclusion_list:\n if input_filtered not in ' ':\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n def fieldValid(result, field):\n \"\"\" This method checks the custom field exists and is also of format str/unicode \"\"\"\n if result.has_key(field):\n if type(result[field]) == str or type(result[field]) == unicode:\n return True\n else:\n print('Error: custom field {0} was of {1} instead of str/unicode'.format(field, type(result[field])))\n return False\n else:\n print('Error: custom field {0} was not found in the results'.format(field))\n return False\n for result in results:\n valid = True\n for field in self.filter_fields:\n if fieldValid(result, field):\n valid = not self.matches_exclusion_list(result[field])\n if valid == False:\n break\n if valid == True:\n yield result\n<|end_body_2|>\n", "class_docstring": "Filters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results", "class_name": "ExclusionFilter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExclusionFilter:\n \"\"\"Filters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\"\"\"\n\n def __init__(self, order=0, terms='', customFields=[]):\n \"\"\"Constructor for ExclusionFilter.\"\"\"\n <|body_0|>\n\n def matches_exclusion_list(self, input_string):\n \"\"\"Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\"\"\"\n <|body_1|>\n\n def filter(self, results):\n \"\"\"Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ExclusionFilter, self).__init__(order)\n self.info = 'Filters search results based on a exclusion list.'\n self.exclusion_list_string = ' '.join(filter(str.isalpha, terms.replace('+', ' ').lower().split()))\n self.filter_fields = ['title', 'summary']\n for field in customFields:\n self.filter_fields.append(field)\n<|end_body_0|>\n\n<|body_start_1|>\n input_list = input_string.split()\n exclusion_list = self.exclusion_list_string.split()\n for input in input_list:\n try:\n input_filtered = ''.join(filter(str.isalpha, list(input.lower())))\n except TypeError:\n tmp = input.encode('utf-8').lower()\n input_filtered = ''.join(filter(str.isalpha, list(tmp)))\n if input_filtered in exclusion_list:\n if input_filtered not in ' ':\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n def fieldValid(result, field):\n \"\"\" This method checks the custom field exists and is also of format str/unicode \"\"\"\n if result.has_key(field):\n if type(result[field]) == str or type(result[field]) == unicode:\n return True\n else:\n print('Error: custom field {0} was of {1} instead of str/unicode'.format(field, type(result[field])))\n return False\n else:\n print('Error: custom field {0} was not found in the results'.format(field))\n return False\n for result in results:\n valid = True\n for field in self.filter_fields:\n if fieldValid(result, field):\n valid = not self.matches_exclusion_list(result[field])\n if valid == False:\n break\n if valid == True:\n yield result\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000063", "length_bytes": 3745, "license_type": "permissive", "methods": [{"docstring": "Constructor for ExclusionFilter.", "name": "__init__", "signature": "def __init__(self, order=0, terms='', customFields=[])"}, {"docstring": "Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case", "name": "matches_exclusion_list", "signature": "def matches_exclusion_list(self, input_string)"}, {"docstring": "Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results", "name": "filter", "signature": "def filter(self, results)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005642", "prompt": "Implement the Python class `ExclusionFilter` described below.\n\nClass description:\nFilters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\n\nMethod signatures and docstrings:\n- def __init__(self, order=0, terms='', customFields=[]): Constructor for ExclusionFilter.\n- def matches_exclusion_list(self, input_string): Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\n- def filter(self, results): Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results", "prompted_full_text": "Implement the Python class `ExclusionFilter` described below.\n\nClass description:\nFilters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\n\nMethod signatures and docstrings:\n- def __init__(self, order=0, terms='', customFields=[]): Constructor for ExclusionFilter.\n- def matches_exclusion_list(self, input_string): Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\n- def filter(self, results): Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results\n\n<|skeleton|>\nclass ExclusionFilter:\n \"\"\"Filters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\"\"\"\n\n def __init__(self, order=0, terms='', customFields=[]):\n \"\"\"Constructor for ExclusionFilter.\"\"\"\n <|body_0|>\n\n def matches_exclusion_list(self, input_string):\n \"\"\"Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\"\"\"\n <|body_1|>\n\n def filter(self, results):\n \"\"\"Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ExclusionFilter, self).__init__(order)\n self.info = 'Filters search results based on a exclusion list.'\n self.exclusion_list_string = ' '.join(filter(str.isalpha, terms.replace('+', ' ').lower().split()))\n self.filter_fields = ['title', 'summary']\n for field in customFields:\n self.filter_fields.append(field)\n<|end_body_0|>\n\n<|body_start_1|>\n input_list = input_string.split()\n exclusion_list = self.exclusion_list_string.split()\n for input in input_list:\n try:\n input_filtered = ''.join(filter(str.isalpha, list(input.lower())))\n except TypeError:\n tmp = input.encode('utf-8').lower()\n input_filtered = ''.join(filter(str.isalpha, list(tmp)))\n if input_filtered in exclusion_list:\n if input_filtered not in ' ':\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n def fieldValid(result, field):\n \"\"\" This method checks the custom field exists and is also of format str/unicode \"\"\"\n if result.has_key(field):\n if type(result[field]) == str or type(result[field]) == unicode:\n return True\n else:\n print('Error: custom field {0} was of {1} instead of str/unicode'.format(field, type(result[field])))\n return False\n else:\n print('Error: custom field {0} was not found in the results'.format(field))\n return False\n for result in results:\n valid = True\n for field in self.filter_fields:\n if fieldValid(result, field):\n valid = not self.matches_exclusion_list(result[field])\n if valid == False:\n break\n if valid == True:\n yield result\n<|end_body_2|>\n", "revision_id": "ed72aee466649bd834d5b4459eb6e0173df6e2ec", "skeleton": "<|skeleton|>\nclass ExclusionFilter:\n \"\"\"Filters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\"\"\"\n\n def __init__(self, order=0, terms='', customFields=[]):\n \"\"\"Constructor for ExclusionFilter.\"\"\"\n <|body_0|>\n\n def matches_exclusion_list(self, input_string):\n \"\"\"Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\"\"\"\n <|body_1|>\n\n def filter(self, results):\n \"\"\"Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExclusionFilter:\n \"\"\"Filters search results based on a list of words to exclude, if any of these are found the result in question is rejected. Options: * order (int): defines when, in the pipeline, this filter will be executed * terms (str): terms that, if appearing in the result, will cause it to be rejected - separated by \"+'s\" * customFields (list of str): extra fields in the results to filter with the exclusion list - depedendent upon their existence in the search service results\"\"\"\n\n def __init__(self, order=0, terms='', customFields=[]):\n \"\"\"Constructor for ExclusionFilter.\"\"\"\n super(ExclusionFilter, self).__init__(order)\n self.info = 'Filters search results based on a exclusion list.'\n self.exclusion_list_string = ' '.join(filter(str.isalpha, terms.replace('+', ' ').lower().split()))\n self.filter_fields = ['title', 'summary']\n for field in customFields:\n self.filter_fields.append(field)\n\n def matches_exclusion_list(self, input_string):\n \"\"\"Removes results that includes words contained in exclusion list. Parameters: * exclusion_list_string: terms which, if found, will cause a result to be rejected * input_string: string with words separated by blank spaces i.e. the result field being checked for terms from the exclusion list Returns: * true: if any of the words of the input string is included in the exclusion list false: in other case\"\"\"\n input_list = input_string.split()\n exclusion_list = self.exclusion_list_string.split()\n for input in input_list:\n try:\n input_filtered = ''.join(filter(str.isalpha, list(input.lower())))\n except TypeError:\n tmp = input.encode('utf-8').lower()\n input_filtered = ''.join(filter(str.isalpha, list(tmp)))\n if input_filtered in exclusion_list:\n if input_filtered not in ' ':\n return True\n return False\n\n def filter(self, results):\n \"\"\"Filters the results according to exclusion list - rejecting results containing offending words. Parameters: * results (puppy.model.Opensearch.Response): results to be filtered Returns: * results_returned (puppy.model.Opensearch.Response): filtered results\"\"\"\n def fieldValid(result, field):\n \"\"\" This method checks the custom field exists and is also of format str/unicode \"\"\"\n if result.has_key(field):\n if type(result[field]) == str or type(result[field]) == unicode:\n return True\n else:\n print('Error: custom field {0} was of {1} instead of str/unicode'.format(field, type(result[field])))\n return False\n else:\n print('Error: custom field {0} was not found in the results'.format(field))\n return False\n for result in results:\n valid = True\n for field in self.filter_fields:\n if fieldValid(result, field):\n valid = not self.matches_exclusion_list(result[field])\n if valid == False:\n break\n if valid == True:\n yield result\n", "source": "the_stack_v2_python_sparse", "source_path": "reference-code/puppy/result/filter/exclusionfilter.py", "source_repo": "Granvanoeli/ifind", "split": "val", "star_events_count": 0} {"blob_id": "162248bb0e324e8d7467fba03e58935f797dc7de", "bodies": ["if not builder:\n raise ValueError('Builder is not specified')\nself.__builder = builder", "if not containerOsh:\n raise ValueError('Container is not specified')\nosh = self.__builder.buildFile(file_)\nosh.setContainer(containerOsh)\nreturn osh"], "bodies_text": "<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Reporter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000064", "length_bytes": 12587, "license_type": "no_license", "methods": [{"docstring": "@types: file_topology.Builder @raise ValueError: Builder is not specified", "name": "__init__", "signature": "def __init__(self, builder)"}, {"docstring": "@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified", "name": "report", "signature": "def report(self, file_, containerOsh)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000768", "prompt": "Implement the Python class `Reporter` described below.\n\nClass description:\nImplement the Reporter class.\n\nMethod signatures and docstrings:\n- def __init__(self, builder): @types: file_topology.Builder @raise ValueError: Builder is not specified\n- def report(self, file_, containerOsh): @types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified", "prompted_full_text": "Implement the Python class `Reporter` described below.\n\nClass description:\nImplement the Reporter class.\n\nMethod signatures and docstrings:\n- def __init__(self, builder): @types: file_topology.Builder @raise ValueError: Builder is not specified\n- def report(self, file_, containerOsh): @types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\n\n<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n<|end_body_0|>\n\n<|body_start_1|>\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n<|end_body_1|>\n", "revision_id": "c431e809e8d0f82e1bca7e3429dd0245560b5680", "skeleton": "<|skeleton|>\nclass Reporter:\n\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n <|body_0|>\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Reporter:\n def __init__(self, builder):\n \"\"\"@types: file_topology.Builder @raise ValueError: Builder is not specified\"\"\"\n if not builder:\n raise ValueError('Builder is not specified')\n self.__builder = builder\n\n def report(self, file_, containerOsh):\n \"\"\"@types: File, ObjectStateHolder -> ObjectStateHolder @raise ValueError: Container is not specified\"\"\"\n if not containerOsh:\n raise ValueError('Container is not specified')\n osh = self.__builder.buildFile(file_)\n osh.setContainer(containerOsh)\n return osh\n", "source": "the_stack_v2_python_sparse", "source_path": "reference/ucmdb/discovery/file_topology.py", "source_repo": "madmonkyang/cda-record", "split": "val", "star_events_count": 0} {"blob_id": "381789bae6d2d87363a5e0c651b765565a2019c1", "bodies": ["self.folder_base = folder\nsuper().__init__(folder, image_extension='JPG')\nif not self.explicit_extrinsics_paths:\n self.explicit_extrinsics_paths = self.__generate_extrinsics_from_reconstruction()", "reconstruction_path = os.path.join(self.folder_base, 'reconstruction', 'data.mat')\nextrinsics_path_template = os.path.join(self.folder_base, 'extrinsics', '{}.npy')\nloaded_data = scipy_io.loadmat(reconstruction_path)\nimage_names = loaded_data['imnames']\nposes = loaded_data['P'][0]\nnum_images = image_names.shape[0]\nfilenames = []\nfor idx in range(num_images):\n image_name = image_names[idx][0][0][0]\n image_name = os.path.splitext(image_name)[0]\n extrinsics = poses[idx]\n filename = extrinsics_path_template.format(image_name)\n np.save(filename, extrinsics)\n filenames.append(filename)\nreturn filenames"], "bodies_text": "<|body_start_0|>\n self.folder_base = folder\n super().__init__(folder, image_extension='JPG')\n if not self.explicit_extrinsics_paths:\n self.explicit_extrinsics_paths = self.__generate_extrinsics_from_reconstruction()\n<|end_body_0|>\n\n<|body_start_1|>\n reconstruction_path = os.path.join(self.folder_base, 'reconstruction', 'data.mat')\n extrinsics_path_template = os.path.join(self.folder_base, 'extrinsics', '{}.npy')\n loaded_data = scipy_io.loadmat(reconstruction_path)\n image_names = loaded_data['imnames']\n poses = loaded_data['P'][0]\n num_images = image_names.shape[0]\n filenames = []\n for idx in range(num_images):\n image_name = image_names[idx][0][0][0]\n image_name = os.path.splitext(image_name)[0]\n extrinsics = poses[idx]\n filename = extrinsics_path_template.format(image_name)\n np.save(filename, extrinsics)\n filenames.append(filename)\n return filenames\n<|end_body_1|>\n", "class_docstring": "Simple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.", "class_name": "LundDatasetLoader", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LundDatasetLoader:\n \"\"\"Simple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\"\"\"\n\n def __init__(self, folder: str) -> None:\n \"\"\"Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\"\"\"\n <|body_0|>\n\n def __generate_extrinsics_from_reconstruction(self) -> List[str]:\n \"\"\"Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.folder_base = folder\n super().__init__(folder, image_extension='JPG')\n if not self.explicit_extrinsics_paths:\n self.explicit_extrinsics_paths = self.__generate_extrinsics_from_reconstruction()\n<|end_body_0|>\n\n<|body_start_1|>\n reconstruction_path = os.path.join(self.folder_base, 'reconstruction', 'data.mat')\n extrinsics_path_template = os.path.join(self.folder_base, 'extrinsics', '{}.npy')\n loaded_data = scipy_io.loadmat(reconstruction_path)\n image_names = loaded_data['imnames']\n poses = loaded_data['P'][0]\n num_images = image_names.shape[0]\n filenames = []\n for idx in range(num_images):\n image_name = image_names[idx][0][0][0]\n image_name = os.path.splitext(image_name)[0]\n extrinsics = poses[idx]\n filename = extrinsics_path_template.format(image_name)\n np.save(filename, extrinsics)\n filenames.append(filename)\n return filenames\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000065", "length_bytes": 2397, "license_type": "permissive", "methods": [{"docstring": "Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.", "name": "__init__", "signature": "def __init__(self, folder: str) -> None"}, {"docstring": "Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.", "name": "__generate_extrinsics_from_reconstruction", "signature": "def __generate_extrinsics_from_reconstruction(self) -> List[str]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000336", "prompt": "Implement the Python class `LundDatasetLoader` described below.\n\nClass description:\nSimple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\n\nMethod signatures and docstrings:\n- def __init__(self, folder: str) -> None: Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\n- def __generate_extrinsics_from_reconstruction(self) -> List[str]: Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.", "prompted_full_text": "Implement the Python class `LundDatasetLoader` described below.\n\nClass description:\nSimple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\n\nMethod signatures and docstrings:\n- def __init__(self, folder: str) -> None: Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\n- def __generate_extrinsics_from_reconstruction(self) -> List[str]: Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.\n\n<|skeleton|>\nclass LundDatasetLoader:\n \"\"\"Simple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\"\"\"\n\n def __init__(self, folder: str) -> None:\n \"\"\"Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\"\"\"\n <|body_0|>\n\n def __generate_extrinsics_from_reconstruction(self) -> List[str]:\n \"\"\"Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.folder_base = folder\n super().__init__(folder, image_extension='JPG')\n if not self.explicit_extrinsics_paths:\n self.explicit_extrinsics_paths = self.__generate_extrinsics_from_reconstruction()\n<|end_body_0|>\n\n<|body_start_1|>\n reconstruction_path = os.path.join(self.folder_base, 'reconstruction', 'data.mat')\n extrinsics_path_template = os.path.join(self.folder_base, 'extrinsics', '{}.npy')\n loaded_data = scipy_io.loadmat(reconstruction_path)\n image_names = loaded_data['imnames']\n poses = loaded_data['P'][0]\n num_images = image_names.shape[0]\n filenames = []\n for idx in range(num_images):\n image_name = image_names[idx][0][0][0]\n image_name = os.path.splitext(image_name)[0]\n extrinsics = poses[idx]\n filename = extrinsics_path_template.format(image_name)\n np.save(filename, extrinsics)\n filenames.append(filename)\n return filenames\n<|end_body_1|>\n", "revision_id": "245fb4d90bf6d63d45af8f77a4debfe46ea52ff0", "skeleton": "<|skeleton|>\nclass LundDatasetLoader:\n \"\"\"Simple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\"\"\"\n\n def __init__(self, folder: str) -> None:\n \"\"\"Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\"\"\"\n <|body_0|>\n\n def __generate_extrinsics_from_reconstruction(self) -> List[str]:\n \"\"\"Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LundDatasetLoader:\n \"\"\"Simple loader class that reads from a folder on disk. Folder layout structure: - RGB Images: images/ - Extrinsics data (optional): extrinsics/ - numpy array with the same name as images If explicit intrinsics are not provided, the exif data will be used.\"\"\"\n\n def __init__(self, folder: str) -> None:\n \"\"\"Initialize object to load image data from a specified folder on disk Args: folder: the base folder for a given scene.\"\"\"\n self.folder_base = folder\n super().__init__(folder, image_extension='JPG')\n if not self.explicit_extrinsics_paths:\n self.explicit_extrinsics_paths = self.__generate_extrinsics_from_reconstruction()\n\n def __generate_extrinsics_from_reconstruction(self) -> List[str]:\n \"\"\"Extract extrinsics from mat file and stores them as numpy arrays. The reconstruction used for extrinsics is provided by Carl Olsson as part of the Lund dataset. Returns: file names of generated extrinsics for each pose.\"\"\"\n reconstruction_path = os.path.join(self.folder_base, 'reconstruction', 'data.mat')\n extrinsics_path_template = os.path.join(self.folder_base, 'extrinsics', '{}.npy')\n loaded_data = scipy_io.loadmat(reconstruction_path)\n image_names = loaded_data['imnames']\n poses = loaded_data['P'][0]\n num_images = image_names.shape[0]\n filenames = []\n for idx in range(num_images):\n image_name = image_names[idx][0][0][0]\n image_name = os.path.splitext(image_name)[0]\n extrinsics = poses[idx]\n filename = extrinsics_path_template.format(image_name)\n np.save(filename, extrinsics)\n filenames.append(filename)\n return filenames\n", "source": "the_stack_v2_python_sparse", "source_path": "gtsfm/loader/lund_dataset_loader.py", "source_repo": "asa/gtsfm", "split": "val", "star_events_count": 0} {"blob_id": "e3109d9c413e5bac266b67ac773a6b4a42774e26", "bodies": ["ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\njar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\nself.st = StanfordNERTagger(ser_path, jar_path)", "cleaned_text = CleanComments.filter_special_characters(comment=text)\nwords = cleaned_text.strip().split()\ntags = []\ntry:\n tags = self.st.tag(words)\nexcept Exception as e:\n logger.warning(msg='Error identifying entities using Stanford: %s ' % str(e))\nreturn [tag[0].lower() for tag in tags if tag[1] == 'PERSON']"], "bodies_text": "<|body_start_0|>\n ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\n jar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\n self.st = StanfordNERTagger(ser_path, jar_path)\n<|end_body_0|>\n\n<|body_start_1|>\n cleaned_text = CleanComments.filter_special_characters(comment=text)\n words = cleaned_text.strip().split()\n tags = []\n try:\n tags = self.st.tag(words)\n except Exception as e:\n logger.warning(msg='Error identifying entities using Stanford: %s ' % str(e))\n return [tag[0].lower() for tag in tags if tag[1] == 'PERSON']\n<|end_body_1|>\n", "class_docstring": "", "class_name": "StfNERTagger", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StfNERTagger:\n\n def __init__(self):\n \"\"\"Open client for Stanford NERTagger :return: protocol open\"\"\"\n <|body_0|>\n\n def identify_person_types(self, text: str) -> list:\n \"\"\"Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\n jar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\n self.st = StanfordNERTagger(ser_path, jar_path)\n<|end_body_0|>\n\n<|body_start_1|>\n cleaned_text = CleanComments.filter_special_characters(comment=text)\n words = cleaned_text.strip().split()\n tags = []\n try:\n tags = self.st.tag(words)\n except Exception as e:\n logger.warning(msg='Error identifying entities using Stanford: %s ' % str(e))\n return [tag[0].lower() for tag in tags if tag[1] == 'PERSON']\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000066", "length_bytes": 1218, "license_type": "permissive", "methods": [{"docstring": "Open client for Stanford NERTagger :return: protocol open", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list", "name": "identify_person_types", "signature": "def identify_person_types(self, text: str) -> list"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006999", "prompt": "Implement the Python class `StfNERTagger` described below.\n\nClass description:\nImplement the StfNERTagger class.\n\nMethod signatures and docstrings:\n- def __init__(self): Open client for Stanford NERTagger :return: protocol open\n- def identify_person_types(self, text: str) -> list: Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list", "prompted_full_text": "Implement the Python class `StfNERTagger` described below.\n\nClass description:\nImplement the StfNERTagger class.\n\nMethod signatures and docstrings:\n- def __init__(self): Open client for Stanford NERTagger :return: protocol open\n- def identify_person_types(self, text: str) -> list: Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list\n\n<|skeleton|>\nclass StfNERTagger:\n\n def __init__(self):\n \"\"\"Open client for Stanford NERTagger :return: protocol open\"\"\"\n <|body_0|>\n\n def identify_person_types(self, text: str) -> list:\n \"\"\"Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\n jar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\n self.st = StanfordNERTagger(ser_path, jar_path)\n<|end_body_0|>\n\n<|body_start_1|>\n cleaned_text = CleanComments.filter_special_characters(comment=text)\n words = cleaned_text.strip().split()\n tags = []\n try:\n tags = self.st.tag(words)\n except Exception as e:\n logger.warning(msg='Error identifying entities using Stanford: %s ' % str(e))\n return [tag[0].lower() for tag in tags if tag[1] == 'PERSON']\n<|end_body_1|>\n", "revision_id": "c98eb8c483a05af938a2f6f49d8ea803f5711572", "skeleton": "<|skeleton|>\nclass StfNERTagger:\n\n def __init__(self):\n \"\"\"Open client for Stanford NERTagger :return: protocol open\"\"\"\n <|body_0|>\n\n def identify_person_types(self, text: str) -> list:\n \"\"\"Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StfNERTagger:\n def __init__(self):\n \"\"\"Open client for Stanford NERTagger :return: protocol open\"\"\"\n ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\n jar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\n self.st = StanfordNERTagger(ser_path, jar_path)\n\n def identify_person_types(self, text: str) -> list:\n \"\"\"Users Stanford NERTagger to identify person types. It cleans-up some unwanted chars to have better accuracy :param text: text to identify types :return: list\"\"\"\n cleaned_text = CleanComments.filter_special_characters(comment=text)\n words = cleaned_text.strip().split()\n tags = []\n try:\n tags = self.st.tag(words)\n except Exception as e:\n logger.warning(msg='Error identifying entities using Stanford: %s ' % str(e))\n return [tag[0].lower() for tag in tags if tag[1] == 'PERSON']\n", "source": "the_stack_v2_python_sparse", "source_path": "engage-analytics/utils/stanford/ner_tagger.py", "source_repo": "oliveriopt/mood-analytics", "split": "val", "star_events_count": 0} {"blob_id": "2efab58566853984d83853eb9ecc00bcf1771983", "bodies": ["self.dict = {}\nfor i in range(len(wordsDict)):\n if wordsDict[i] in self.dict:\n self.dict[wordsDict[i]].append(i)\n else:\n self.dict[wordsDict[i]] = [i]\nfor k in self.dict:\n self.dict[k].sort()\nself.mindist = {}", "if word1 + '_' + word2 in self.mindist:\n return self.mindist['word1' + '_' + 'word2']\nif word2 + '_' + word1 in self.mindist:\n return self.mindist['word2' + '_' + 'word1']\nq = 0\np = 0\nmindist = float('inf')\nwhile q < len(self.dict[word2]):\n v2 = self.dict[word2][q]\n while p <= len(self.dict[word1]):\n v1 = self.dict[word1][p] if p == len(self.dict[word1]) else float('inf')\n prev1 = self.dict[word1][p - 1] if p > 0 else -float('inf')\n if v2 <= v1 and v2 >= prev1:\n mindist = min(mindist, abs(v2 - v1), abs(v2 - prev1))\n break\n p += 1\n q += 1\nself.mindist['word2' + '_' + 'word1'] = mindist\nself.mindist['word1' + '_' + 'word2'] = mindist\nreturn mindist"], "bodies_text": "<|body_start_0|>\n self.dict = {}\n for i in range(len(wordsDict)):\n if wordsDict[i] in self.dict:\n self.dict[wordsDict[i]].append(i)\n else:\n self.dict[wordsDict[i]] = [i]\n for k in self.dict:\n self.dict[k].sort()\n self.mindist = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if word1 + '_' + word2 in self.mindist:\n return self.mindist['word1' + '_' + 'word2']\n if word2 + '_' + word1 in self.mindist:\n return self.mindist['word2' + '_' + 'word1']\n q = 0\n p = 0\n mindist = float('inf')\n while q < len(self.dict[word2]):\n v2 = self.dict[word2][q]\n while p <= len(self.dict[word1]):\n v1 = self.dict[word1][p] if p == len(self.dict[word1]) else float('inf')\n prev1 = self.dict[word1][p - 1] if p > 0 else -float('inf')\n if v2 <= v1 and v2 >= prev1:\n mindist = min(mindist, abs(v2 - v1), abs(v2 - prev1))\n break\n p += 1\n q += 1\n self.mindist['word2' + '_' + 'word1'] = mindist\n self.mindist['word1' + '_' + 'word2'] = mindist\n return mindist\n<|end_body_1|>\n", "class_docstring": "", "class_name": "WordDistance", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, wordsDict):\n \"\"\":type wordsDict: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dict = {}\n for i in range(len(wordsDict)):\n if wordsDict[i] in self.dict:\n self.dict[wordsDict[i]].append(i)\n else:\n self.dict[wordsDict[i]] = [i]\n for k in self.dict:\n self.dict[k].sort()\n self.mindist = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if word1 + '_' + word2 in self.mindist:\n return self.mindist['word1' + '_' + 'word2']\n if word2 + '_' + word1 in self.mindist:\n return self.mindist['word2' + '_' + 'word1']\n q = 0\n p = 0\n mindist = float('inf')\n while q < len(self.dict[word2]):\n v2 = self.dict[word2][q]\n while p <= len(self.dict[word1]):\n v1 = self.dict[word1][p] if p == len(self.dict[word1]) else float('inf')\n prev1 = self.dict[word1][p - 1] if p > 0 else -float('inf')\n if v2 <= v1 and v2 >= prev1:\n mindist = min(mindist, abs(v2 - v1), abs(v2 - prev1))\n break\n p += 1\n q += 1\n self.mindist['word2' + '_' + 'word1'] = mindist\n self.mindist['word1' + '_' + 'word2'] = mindist\n return mindist\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000067", "length_bytes": 1522, "license_type": "no_license", "methods": [{"docstring": ":type wordsDict: List[str]", "name": "__init__", "signature": "def __init__(self, wordsDict)"}, {"docstring": ":type word1: str :type word2: str :rtype: int", "name": "shortest", "signature": "def shortest(self, word1, word2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002869", "prompt": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, wordsDict): :type wordsDict: List[str]\n- def shortest(self, word1, word2): :type word1: str :type word2: str :rtype: int", "prompted_full_text": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, wordsDict): :type wordsDict: List[str]\n- def shortest(self, word1, word2): :type word1: str :type word2: str :rtype: int\n\n<|skeleton|>\nclass WordDistance:\n\n def __init__(self, wordsDict):\n \"\"\":type wordsDict: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dict = {}\n for i in range(len(wordsDict)):\n if wordsDict[i] in self.dict:\n self.dict[wordsDict[i]].append(i)\n else:\n self.dict[wordsDict[i]] = [i]\n for k in self.dict:\n self.dict[k].sort()\n self.mindist = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if word1 + '_' + word2 in self.mindist:\n return self.mindist['word1' + '_' + 'word2']\n if word2 + '_' + word1 in self.mindist:\n return self.mindist['word2' + '_' + 'word1']\n q = 0\n p = 0\n mindist = float('inf')\n while q < len(self.dict[word2]):\n v2 = self.dict[word2][q]\n while p <= len(self.dict[word1]):\n v1 = self.dict[word1][p] if p == len(self.dict[word1]) else float('inf')\n prev1 = self.dict[word1][p - 1] if p > 0 else -float('inf')\n if v2 <= v1 and v2 >= prev1:\n mindist = min(mindist, abs(v2 - v1), abs(v2 - prev1))\n break\n p += 1\n q += 1\n self.mindist['word2' + '_' + 'word1'] = mindist\n self.mindist['word1' + '_' + 'word2'] = mindist\n return mindist\n<|end_body_1|>\n", "revision_id": "48b43999fb7e2ed82d922e1f64ac76f8fabe4baa", "skeleton": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, wordsDict):\n \"\"\":type wordsDict: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WordDistance:\n def __init__(self, wordsDict):\n \"\"\":type wordsDict: List[str]\"\"\"\n self.dict = {}\n for i in range(len(wordsDict)):\n if wordsDict[i] in self.dict:\n self.dict[wordsDict[i]].append(i)\n else:\n self.dict[wordsDict[i]] = [i]\n for k in self.dict:\n self.dict[k].sort()\n self.mindist = {}\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n if word1 + '_' + word2 in self.mindist:\n return self.mindist['word1' + '_' + 'word2']\n if word2 + '_' + word1 in self.mindist:\n return self.mindist['word2' + '_' + 'word1']\n q = 0\n p = 0\n mindist = float('inf')\n while q < len(self.dict[word2]):\n v2 = self.dict[word2][q]\n while p <= len(self.dict[word1]):\n v1 = self.dict[word1][p] if p == len(self.dict[word1]) else float('inf')\n prev1 = self.dict[word1][p - 1] if p > 0 else -float('inf')\n if v2 <= v1 and v2 >= prev1:\n mindist = min(mindist, abs(v2 - v1), abs(v2 - prev1))\n break\n p += 1\n q += 1\n self.mindist['word2' + '_' + 'word1'] = mindist\n self.mindist['word1' + '_' + 'word2'] = mindist\n return mindist\n", "source": "the_stack_v2_python_sparse", "source_path": "244.py", "source_repo": "saleed/LeetCode", "split": "val", "star_events_count": 2} {"blob_id": "036165f39d29e81fa40e4bc19ae866ddb7b23259", "bodies": ["snap = super(FlowItem, self).snapshot()\nsnap['preferred_size'] = self.preferred_size\nsnap['align'] = self.align\nsnap['stretch'] = self.stretch\nsnap['ortho_stretch'] = self.ortho_stretch\nreturn snap", "super(FlowItem, self).bind()\nattrs = ('preferred_size', 'align', 'stretch', 'ortho_stretch')\nself.publish_attributes(*attrs)", "widget = None\nfor child in self.children:\n if isinstance(child, Container):\n widget = child\nreturn widget"], "bodies_text": "<|body_start_0|>\n snap = super(FlowItem, self).snapshot()\n snap['preferred_size'] = self.preferred_size\n snap['align'] = self.align\n snap['stretch'] = self.stretch\n snap['ortho_stretch'] = self.ortho_stretch\n return snap\n<|end_body_0|>\n\n<|body_start_1|>\n super(FlowItem, self).bind()\n attrs = ('preferred_size', 'align', 'stretch', 'ortho_stretch')\n self.publish_attributes(*attrs)\n<|end_body_1|>\n\n<|body_start_2|>\n widget = None\n for child in self.children:\n if isinstance(child, Container):\n widget = child\n return widget\n<|end_body_2|>\n", "class_docstring": "A widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.", "class_name": "FlowItem", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlowItem:\n \"\"\"A widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\"\"\"\n\n def snapshot(self):\n \"\"\"Returns the snapshot dict for the FlowItem.\"\"\"\n <|body_0|>\n\n def bind(self):\n \"\"\"Bind the change handler for the FlowItem.\"\"\"\n <|body_1|>\n\n def _get_flow_widget(self):\n \"\"\"The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n snap = super(FlowItem, self).snapshot()\n snap['preferred_size'] = self.preferred_size\n snap['align'] = self.align\n snap['stretch'] = self.stretch\n snap['ortho_stretch'] = self.ortho_stretch\n return snap\n<|end_body_0|>\n\n<|body_start_1|>\n super(FlowItem, self).bind()\n attrs = ('preferred_size', 'align', 'stretch', 'ortho_stretch')\n self.publish_attributes(*attrs)\n<|end_body_1|>\n\n<|body_start_2|>\n widget = None\n for child in self.children:\n if isinstance(child, Container):\n widget = child\n return widget\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000068", "length_bytes": 3170, "license_type": "permissive", "methods": [{"docstring": "Returns the snapshot dict for the FlowItem.", "name": "snapshot", "signature": "def snapshot(self)"}, {"docstring": "Bind the change handler for the FlowItem.", "name": "bind", "signature": "def bind(self)"}, {"docstring": "The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.", "name": "_get_flow_widget", "signature": "def _get_flow_widget(self)"}], "n_methods": 3, "prompt": "Implement the Python class `FlowItem` described below.\n\nClass description:\nA widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\n\nMethod signatures and docstrings:\n- def snapshot(self): Returns the snapshot dict for the FlowItem.\n- def bind(self): Bind the change handler for the FlowItem.\n- def _get_flow_widget(self): The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.", "prompted_full_text": "Implement the Python class `FlowItem` described below.\n\nClass description:\nA widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\n\nMethod signatures and docstrings:\n- def snapshot(self): Returns the snapshot dict for the FlowItem.\n- def bind(self): Bind the change handler for the FlowItem.\n- def _get_flow_widget(self): The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.\n\n<|skeleton|>\nclass FlowItem:\n \"\"\"A widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\"\"\"\n\n def snapshot(self):\n \"\"\"Returns the snapshot dict for the FlowItem.\"\"\"\n <|body_0|>\n\n def bind(self):\n \"\"\"Bind the change handler for the FlowItem.\"\"\"\n <|body_1|>\n\n def _get_flow_widget(self):\n \"\"\"The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n snap = super(FlowItem, self).snapshot()\n snap['preferred_size'] = self.preferred_size\n snap['align'] = self.align\n snap['stretch'] = self.stretch\n snap['ortho_stretch'] = self.ortho_stretch\n return snap\n<|end_body_0|>\n\n<|body_start_1|>\n super(FlowItem, self).bind()\n attrs = ('preferred_size', 'align', 'stretch', 'ortho_stretch')\n self.publish_attributes(*attrs)\n<|end_body_1|>\n\n<|body_start_2|>\n widget = None\n for child in self.children:\n if isinstance(child, Container):\n widget = child\n return widget\n<|end_body_2|>\n", "revision_id": "424bba29219de58fe9e47196de6763de8b2009f2", "skeleton": "<|skeleton|>\nclass FlowItem:\n \"\"\"A widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\"\"\"\n\n def snapshot(self):\n \"\"\"Returns the snapshot dict for the FlowItem.\"\"\"\n <|body_0|>\n\n def bind(self):\n \"\"\"Bind the change handler for the FlowItem.\"\"\"\n <|body_1|>\n\n def _get_flow_widget(self):\n \"\"\"The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FlowItem:\n \"\"\"A widget which can be used as an item in a FlowArea. A FlowItem is a widget which can be used as a child of a FlowArea widget. It can have at most a single child widget which is an instance of Container.\"\"\"\n\n def snapshot(self):\n \"\"\"Returns the snapshot dict for the FlowItem.\"\"\"\n snap = super(FlowItem, self).snapshot()\n snap['preferred_size'] = self.preferred_size\n snap['align'] = self.align\n snap['stretch'] = self.stretch\n snap['ortho_stretch'] = self.ortho_stretch\n return snap\n\n def bind(self):\n \"\"\"Bind the change handler for the FlowItem.\"\"\"\n super(FlowItem, self).bind()\n attrs = ('preferred_size', 'align', 'stretch', 'ortho_stretch')\n self.publish_attributes(*attrs)\n\n def _get_flow_widget(self):\n \"\"\"The getter for the 'flow_widget' property. Returns ------- result : Container or None The flow widget for the FlowItem, or None if not provided.\"\"\"\n widget = None\n for child in self.children:\n if isinstance(child, Container):\n widget = child\n return widget\n", "source": "the_stack_v2_python_sparse", "source_path": "enaml/widgets/flow_item.py", "source_repo": "enthought/enaml", "split": "val", "star_events_count": 17} {"blob_id": "abd5c1a29f7f6d7625c832f7e1b9434b41a5d1dd", "bodies": ["self.aliyunrequest.set_action_name('DescribeInstances')\nif not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\nself.Mconfig(config)\nresponse = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\nreturn response", "self.aliyunrequest.set_action_name('DescribeInstanceAttribute')\nif not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\nself.Mconfig(config)\nresponse = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\nreturn response"], "bodies_text": "<|body_start_0|>\n self.aliyunrequest.set_action_name('DescribeInstances')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n self.aliyunrequest.set_action_name('DescribeInstanceAttribute')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_1|>\n", "class_docstring": "查询阿里云redis信息", "class_name": "ALiYunApiRedis", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ALiYunApiRedis:\n \"\"\"查询阿里云redis信息\"\"\"\n\n def DescribeInstances(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_0|>\n\n def DescribeInstanceAttribute(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.aliyunrequest.set_action_name('DescribeInstances')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n self.aliyunrequest.set_action_name('DescribeInstanceAttribute')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000069", "length_bytes": 7651, "license_type": "no_license", "methods": [{"docstring": "调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:", "name": "DescribeInstances", "signature": "def DescribeInstances(self, config)"}, {"docstring": "调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:", "name": "DescribeInstanceAttribute", "signature": "def DescribeInstanceAttribute(self, config)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004508", "prompt": "Implement the Python class `ALiYunApiRedis` described below.\n\nClass description:\n查询阿里云redis信息\n\nMethod signatures and docstrings:\n- def DescribeInstances(self, config): 调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\n- def DescribeInstanceAttribute(self, config): 调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:", "prompted_full_text": "Implement the Python class `ALiYunApiRedis` described below.\n\nClass description:\n查询阿里云redis信息\n\nMethod signatures and docstrings:\n- def DescribeInstances(self, config): 调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\n- def DescribeInstanceAttribute(self, config): 调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\n\n<|skeleton|>\nclass ALiYunApiRedis:\n \"\"\"查询阿里云redis信息\"\"\"\n\n def DescribeInstances(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_0|>\n\n def DescribeInstanceAttribute(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.aliyunrequest.set_action_name('DescribeInstances')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n self.aliyunrequest.set_action_name('DescribeInstanceAttribute')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n<|end_body_1|>\n", "revision_id": "401ad869298d55a6cb2f78442385f67f40b9db52", "skeleton": "<|skeleton|>\nclass ALiYunApiRedis:\n \"\"\"查询阿里云redis信息\"\"\"\n\n def DescribeInstances(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_0|>\n\n def DescribeInstanceAttribute(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ALiYunApiRedis:\n \"\"\"查询阿里云redis信息\"\"\"\n\n def DescribeInstances(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n self.aliyunrequest.set_action_name('DescribeInstances')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n\n def DescribeInstanceAttribute(self, config):\n \"\"\"调用该API可以查询账户下的某一个或多个实例信息。。 :param config: :return:\"\"\"\n self.aliyunrequest.set_action_name('DescribeInstanceAttribute')\n if not isinstance(config, list):\n return self.MResponse(code=20001, msg='config配置不正确', status=False)\n self.Mconfig(config)\n response = self.aliyunapiclient.do_action_with_exception(self.aliyunrequest)\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/maliyun/aliyunapi.py", "source_repo": "Alotofwater/cookcmdb", "split": "val", "star_events_count": 8} {"blob_id": "360698c19016666b2e0abc2cdeb6ce13a12732fd", "bodies": ["url: str = ''\nif api == 'Detections':\n url = 'https://detections.icebrg.io/v1/'\nelif api == 'Sensors':\n url = 'https://sensor.icebrg.io/v1/'\nelif api == 'Entity':\n url = 'https://entity.icebrg.io/v1/entity/'\nreturn url", "headers = {'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json'}\nmatch api:\n case 'Entity':\n return EntityClient(base_url=Client.getUrl(api), headers=headers)\n case 'Sensors':\n return SensorClient(base_url=Client.getUrl(api), headers=headers)\n case 'Detections':\n return DetectionClient(base_url=Client.getUrl(api), headers=headers)"], "bodies_text": "<|body_start_0|>\n url: str = ''\n if api == 'Detections':\n url = 'https://detections.icebrg.io/v1/'\n elif api == 'Sensors':\n url = 'https://sensor.icebrg.io/v1/'\n elif api == 'Entity':\n url = 'https://entity.icebrg.io/v1/entity/'\n return url\n<|end_body_0|>\n\n<|body_start_1|>\n headers = {'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json'}\n match api:\n case 'Entity':\n return EntityClient(base_url=Client.getUrl(api), headers=headers)\n case 'Sensors':\n return SensorClient(base_url=Client.getUrl(api), headers=headers)\n case 'Detections':\n return DetectionClient(base_url=Client.getUrl(api), headers=headers)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Client", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Client:\n\n def getUrl(api) -> str:\n \"\"\"Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\"\"\"\n <|body_0|>\n\n def getClient(api, api_key):\n \"\"\"Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url: str = ''\n if api == 'Detections':\n url = 'https://detections.icebrg.io/v1/'\n elif api == 'Sensors':\n url = 'https://sensor.icebrg.io/v1/'\n elif api == 'Entity':\n url = 'https://entity.icebrg.io/v1/entity/'\n return url\n<|end_body_0|>\n\n<|body_start_1|>\n headers = {'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json'}\n match api:\n case 'Entity':\n return EntityClient(base_url=Client.getUrl(api), headers=headers)\n case 'Sensors':\n return SensorClient(base_url=Client.getUrl(api), headers=headers)\n case 'Detections':\n return DetectionClient(base_url=Client.getUrl(api), headers=headers)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000070", "length_bytes": 36459, "license_type": "permissive", "methods": [{"docstring": "Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str", "name": "getUrl", "signature": "def getUrl(api) -> str"}, {"docstring": "Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str", "name": "getClient", "signature": "def getClient(api, api_key)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004923", "prompt": "Implement the Python class `Client` described below.\n\nClass description:\nImplement the Client class.\n\nMethod signatures and docstrings:\n- def getUrl(api) -> str: Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\n- def getClient(api, api_key): Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str", "prompted_full_text": "Implement the Python class `Client` described below.\n\nClass description:\nImplement the Client class.\n\nMethod signatures and docstrings:\n- def getUrl(api) -> str: Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\n- def getClient(api, api_key): Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str\n\n<|skeleton|>\nclass Client:\n\n def getUrl(api) -> str:\n \"\"\"Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\"\"\"\n <|body_0|>\n\n def getClient(api, api_key):\n \"\"\"Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url: str = ''\n if api == 'Detections':\n url = 'https://detections.icebrg.io/v1/'\n elif api == 'Sensors':\n url = 'https://sensor.icebrg.io/v1/'\n elif api == 'Entity':\n url = 'https://entity.icebrg.io/v1/entity/'\n return url\n<|end_body_0|>\n\n<|body_start_1|>\n headers = {'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json'}\n match api:\n case 'Entity':\n return EntityClient(base_url=Client.getUrl(api), headers=headers)\n case 'Sensors':\n return SensorClient(base_url=Client.getUrl(api), headers=headers)\n case 'Detections':\n return DetectionClient(base_url=Client.getUrl(api), headers=headers)\n<|end_body_1|>\n", "revision_id": "890def5a0e0ae8d6eaa538148249ddbc851dbb6b", "skeleton": "<|skeleton|>\nclass Client:\n\n def getUrl(api) -> str:\n \"\"\"Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\"\"\"\n <|body_0|>\n\n def getClient(api, api_key):\n \"\"\"Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Client:\n def getUrl(api) -> str:\n \"\"\"Provide the base url to access the specific API. :param str api: The specific API for which we need the base url. return: The requested base url rtype str\"\"\"\n url: str = ''\n if api == 'Detections':\n url = 'https://detections.icebrg.io/v1/'\n elif api == 'Sensors':\n url = 'https://sensor.icebrg.io/v1/'\n elif api == 'Entity':\n url = 'https://entity.icebrg.io/v1/entity/'\n return url\n\n def getClient(api, api_key):\n \"\"\"Provide the required Client instance to interact with the specific API. :param str api: The specific API we need to interact with. :param str api_key: The API key to authenticate the request bwing made. return: The requested Client instance. rtype str\"\"\"\n headers = {'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json'}\n match api:\n case 'Entity':\n return EntityClient(base_url=Client.getUrl(api), headers=headers)\n case 'Sensors':\n return SensorClient(base_url=Client.getUrl(api), headers=headers)\n case 'Detections':\n return DetectionClient(base_url=Client.getUrl(api), headers=headers)\n", "source": "the_stack_v2_python_sparse", "source_path": "Packs/GigamonThreatINSIGHT/Integrations/GigamonThreatINSIGHT/GigamonThreatINSIGHT.py", "source_repo": "demisto/content", "split": "val", "star_events_count": 1023} {"blob_id": "f4c85698963db33c68f79c69790bde9aa2d706c2", "bodies": ["self.v = [v1, v2]\nself.n = [len(v1), len(v2)]\nself.row = 1\nself.col = -1", "if self.row == 1:\n self.row = 0 if self.col + 1 < self.n[0] else 1\n self.col += 1\nelif self.col < self.n[1]:\n self.row = 1\nelse:\n self.col += 1\nreturn self.v[self.row][self.col]", "if self.row == 1:\n if self.col + 1 < self.n[0]:\n row_next = 0\n col_next = self.col + 1\n else:\n row_next = 1\n col_next = self.col + 1\nelif self.col < self.n[1]:\n row_next = 1\n col_next = self.col\nelse:\n row_next = 0\n col_next = self.col + 1\nreturn col_next < self.n[row_next]"], "bodies_text": "<|body_start_0|>\n self.v = [v1, v2]\n self.n = [len(v1), len(v2)]\n self.row = 1\n self.col = -1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.row == 1:\n self.row = 0 if self.col + 1 < self.n[0] else 1\n self.col += 1\n elif self.col < self.n[1]:\n self.row = 1\n else:\n self.col += 1\n return self.v[self.row][self.col]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.row == 1:\n if self.col + 1 < self.n[0]:\n row_next = 0\n col_next = self.col + 1\n else:\n row_next = 1\n col_next = self.col + 1\n elif self.col < self.n[1]:\n row_next = 1\n col_next = self.col\n else:\n row_next = 0\n col_next = self.col + 1\n return col_next < self.n[row_next]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ZigzagIterator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ZigzagIterator:\n\n def __init__(self, v1, v2):\n \"\"\"Initialize your data structure here. :type v1: List[int] :type v2: List[int]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.v = [v1, v2]\n self.n = [len(v1), len(v2)]\n self.row = 1\n self.col = -1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.row == 1:\n self.row = 0 if self.col + 1 < self.n[0] else 1\n self.col += 1\n elif self.col < self.n[1]:\n self.row = 1\n else:\n self.col += 1\n return self.v[self.row][self.col]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.row == 1:\n if self.col + 1 < self.n[0]:\n row_next = 0\n col_next = self.col + 1\n else:\n row_next = 1\n col_next = self.col + 1\n elif self.col < self.n[1]:\n row_next = 1\n col_next = self.col\n else:\n row_next = 0\n col_next = self.col + 1\n return col_next < self.n[row_next]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000071", "length_bytes": 1338, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here. :type v1: List[int] :type v2: List[int]", "name": "__init__", "signature": "def __init__(self, v1, v2)"}, {"docstring": ":rtype: int", "name": "next", "signature": "def next(self)"}, {"docstring": ":rtype: bool", "name": "hasNext", "signature": "def hasNext(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006775", "prompt": "Implement the Python class `ZigzagIterator` described below.\n\nClass description:\nImplement the ZigzagIterator class.\n\nMethod signatures and docstrings:\n- def __init__(self, v1, v2): Initialize your data structure here. :type v1: List[int] :type v2: List[int]\n- def next(self): :rtype: int\n- def hasNext(self): :rtype: bool", "prompted_full_text": "Implement the Python class `ZigzagIterator` described below.\n\nClass description:\nImplement the ZigzagIterator class.\n\nMethod signatures and docstrings:\n- def __init__(self, v1, v2): Initialize your data structure here. :type v1: List[int] :type v2: List[int]\n- def next(self): :rtype: int\n- def hasNext(self): :rtype: bool\n\n<|skeleton|>\nclass ZigzagIterator:\n\n def __init__(self, v1, v2):\n \"\"\"Initialize your data structure here. :type v1: List[int] :type v2: List[int]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.v = [v1, v2]\n self.n = [len(v1), len(v2)]\n self.row = 1\n self.col = -1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.row == 1:\n self.row = 0 if self.col + 1 < self.n[0] else 1\n self.col += 1\n elif self.col < self.n[1]:\n self.row = 1\n else:\n self.col += 1\n return self.v[self.row][self.col]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.row == 1:\n if self.col + 1 < self.n[0]:\n row_next = 0\n col_next = self.col + 1\n else:\n row_next = 1\n col_next = self.col + 1\n elif self.col < self.n[1]:\n row_next = 1\n col_next = self.col\n else:\n row_next = 0\n col_next = self.col + 1\n return col_next < self.n[row_next]\n<|end_body_2|>\n", "revision_id": "fad32c510108d21f78540f8c4ed0295341c0c2dc", "skeleton": "<|skeleton|>\nclass ZigzagIterator:\n\n def __init__(self, v1, v2):\n \"\"\"Initialize your data structure here. :type v1: List[int] :type v2: List[int]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ZigzagIterator:\n def __init__(self, v1, v2):\n \"\"\"Initialize your data structure here. :type v1: List[int] :type v2: List[int]\"\"\"\n self.v = [v1, v2]\n self.n = [len(v1), len(v2)]\n self.row = 1\n self.col = -1\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n if self.row == 1:\n self.row = 0 if self.col + 1 < self.n[0] else 1\n self.col += 1\n elif self.col < self.n[1]:\n self.row = 1\n else:\n self.col += 1\n return self.v[self.row][self.col]\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n if self.row == 1:\n if self.col + 1 < self.n[0]:\n row_next = 0\n col_next = self.col + 1\n else:\n row_next = 1\n col_next = self.col + 1\n elif self.col < self.n[1]:\n row_next = 1\n col_next = self.col\n else:\n row_next = 0\n col_next = self.col + 1\n return col_next < self.n[row_next]\n", "source": "the_stack_v2_python_sparse", "source_path": "281 - Zigzag Iterator/zi.py", "source_repo": "huragok/LeetCode-Practce", "split": "val", "star_events_count": 0} {"blob_id": "ba93d254815acb9421e5145c08b634354a27bf77", "bodies": ["hierarchy_mapping_name = os.path.join('..', VG_VisualModule_PICKLES_PATH, hierarchy_mapping_name)\nmodel_path = os.path.join('..', weights_name_dir, WEIGHTS_NAME)\nif not os.path.exists(model_path) or not os.path.exists(hierarchy_mapping_name):\n print('Error: No Weights have been found or No Hierarchy Mapping has been found in {0} or {1}'.format(model_path, hierarchy_mapping_name))\n raise Exception\nself.hierarchy_mapping_objects = cPickle.load(open(hierarchy_mapping_name))\nself.nof_classes = len(self.hierarchy_mapping_objects)\nself.config = Config(gpu_num)\nself.model = self.get_model(self.nof_classes, weight_path=model_path)", "if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\nelse:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\nimg_input = Input(shape=input_shape_img, name='image_input')\nnet = ModelZoo()\nmodel_resnet50 = net.resnet50_base(img_input, trainable=True)\nmodel_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\noutput_resnet50 = Dense(number_of_classes, kernel_initializer='he_normal', activation='softmax', name='fc')(model_resnet50)\nmodel = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\nmodel.summary()\ntry:\n print('Start loading Weights')\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\nexcept Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5', 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'))\n raise Exception(e)\nprint('Finished successfully loading Model')\nreturn model", "layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\nfilters = np.arange(get_num_filters(self.model.layers[layer_idx]))\nvis_images = []\nfor idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\nstitched = utils.stitch_images(vis_images, cols=8)\nplt.axis('off')\nplt.imshow(stitched)\nplt.title(layer_name)\nplt.savefig(savefig_path)\nprint('debug')", "layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\nvis_images = []\nfor idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\nstitched = utils.stitch_images(vis_images)\nplt.axis('off')\nplt.imshow(stitched)\nplt.title(last_layer)\nplt.savefig(savefig_path)\nprint('debug')", "layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\nheatmaps = []\nfor path in image_paths:\n seed_img = utils.load_img(path, target_size=(self.config.crop_height, self.config.crop_width))\n x = np.expand_dims(img_to_array(seed_img), axis=0)\n x = preprocess_input(x)\n pred_class = np.argmax(self.model.predict(x))\n heatmap = visualize_saliency(self.model, layer_idx, [pred_class], seed_img)\n heatmaps.append(heatmap)\nplt.axis('off')\nplt.imshow(utils.stitch_images(heatmaps))\nplt.title('Saliency map')\nplt.savefig(savefig_path)\nprint('debug')"], "bodies_text": "<|body_start_0|>\n hierarchy_mapping_name = os.path.join('..', VG_VisualModule_PICKLES_PATH, hierarchy_mapping_name)\n model_path = os.path.join('..', weights_name_dir, WEIGHTS_NAME)\n if not os.path.exists(model_path) or not os.path.exists(hierarchy_mapping_name):\n print('Error: No Weights have been found or No Hierarchy Mapping has been found in {0} or {1}'.format(model_path, hierarchy_mapping_name))\n raise Exception\n self.hierarchy_mapping_objects = cPickle.load(open(hierarchy_mapping_name))\n self.nof_classes = len(self.hierarchy_mapping_objects)\n self.config = Config(gpu_num)\n self.model = self.get_model(self.nof_classes, weight_path=model_path)\n<|end_body_0|>\n\n<|body_start_1|>\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n img_input = Input(shape=input_shape_img, name='image_input')\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer='he_normal', activation='softmax', name='fc')(model_resnet50)\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n model.summary()\n try:\n print('Start loading Weights')\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5', 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'))\n raise Exception(e)\n print('Finished successfully loading Model')\n return model\n<|end_body_1|>\n\n<|body_start_2|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_2|>\n\n<|body_start_3|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n vis_images = []\n for idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(last_layer)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_3|>\n\n<|body_start_4|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n heatmaps = []\n for path in image_paths:\n seed_img = utils.load_img(path, target_size=(self.config.crop_height, self.config.crop_width))\n x = np.expand_dims(img_to_array(seed_img), axis=0)\n x = preprocess_input(x)\n pred_class = np.argmax(self.model.predict(x))\n heatmap = visualize_saliency(self.model, layer_idx, [pred_class], seed_img)\n heatmaps.append(heatmap)\n plt.axis('off')\n plt.imshow(utils.stitch_images(heatmaps))\n plt.title('Saliency map')\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_4|>\n", "class_docstring": "This class is a network visualizer", "class_name": "NetworkVisualizer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NetworkVisualizer:\n \"\"\"This class is a network visualizer\"\"\"\n\n def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0):\n \"\"\"This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\"\"\"\n <|body_0|>\n\n def get_model(self, number_of_classes, weight_path):\n \"\"\"This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\"\"\"\n <|body_1|>\n\n def visualize_conv_layers(self, layer_name='conv1', savefig_path=''):\n \"\"\"Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\"\"\"\n <|body_2|>\n\n def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20):\n \"\"\"Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\"\"\"\n <|body_3|>\n\n def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''):\n \"\"\"compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hierarchy_mapping_name = os.path.join('..', VG_VisualModule_PICKLES_PATH, hierarchy_mapping_name)\n model_path = os.path.join('..', weights_name_dir, WEIGHTS_NAME)\n if not os.path.exists(model_path) or not os.path.exists(hierarchy_mapping_name):\n print('Error: No Weights have been found or No Hierarchy Mapping has been found in {0} or {1}'.format(model_path, hierarchy_mapping_name))\n raise Exception\n self.hierarchy_mapping_objects = cPickle.load(open(hierarchy_mapping_name))\n self.nof_classes = len(self.hierarchy_mapping_objects)\n self.config = Config(gpu_num)\n self.model = self.get_model(self.nof_classes, weight_path=model_path)\n<|end_body_0|>\n\n<|body_start_1|>\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n img_input = Input(shape=input_shape_img, name='image_input')\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer='he_normal', activation='softmax', name='fc')(model_resnet50)\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n model.summary()\n try:\n print('Start loading Weights')\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5', 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'))\n raise Exception(e)\n print('Finished successfully loading Model')\n return model\n<|end_body_1|>\n\n<|body_start_2|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_2|>\n\n<|body_start_3|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n vis_images = []\n for idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(last_layer)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_3|>\n\n<|body_start_4|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n heatmaps = []\n for path in image_paths:\n seed_img = utils.load_img(path, target_size=(self.config.crop_height, self.config.crop_width))\n x = np.expand_dims(img_to_array(seed_img), axis=0)\n x = preprocess_input(x)\n pred_class = np.argmax(self.model.predict(x))\n heatmap = visualize_saliency(self.model, layer_idx, [pred_class], seed_img)\n heatmaps.append(heatmap)\n plt.axis('off')\n plt.imshow(utils.stitch_images(heatmaps))\n plt.title('Saliency map')\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000072", "length_bytes": 8365, "license_type": "no_license", "methods": [{"docstring": "This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)", "name": "__init__", "signature": "def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0)"}, {"docstring": "This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model", "name": "get_model", "signature": "def get_model(self, number_of_classes, weight_path)"}, {"docstring": "Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image", "name": "visualize_conv_layers", "signature": "def visualize_conv_layers(self, layer_name='conv1', savefig_path='')"}, {"docstring": "Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:", "name": "visualize_dense_layers", "signature": "def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20)"}, {"docstring": "compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:", "name": "visualize_heat_maps", "signature": "def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path='')"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_003621", "prompt": "Implement the Python class `NetworkVisualizer` described below.\n\nClass description:\nThis class is a network visualizer\n\nMethod signatures and docstrings:\n- def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0): This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\n- def get_model(self, number_of_classes, weight_path): This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\n- def visualize_conv_layers(self, layer_name='conv1', savefig_path=''): Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\n- def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20): Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\n- def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''): compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:", "prompted_full_text": "Implement the Python class `NetworkVisualizer` described below.\n\nClass description:\nThis class is a network visualizer\n\nMethod signatures and docstrings:\n- def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0): This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\n- def get_model(self, number_of_classes, weight_path): This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\n- def visualize_conv_layers(self, layer_name='conv1', savefig_path=''): Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\n- def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20): Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\n- def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''): compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:\n\n<|skeleton|>\nclass NetworkVisualizer:\n \"\"\"This class is a network visualizer\"\"\"\n\n def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0):\n \"\"\"This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\"\"\"\n <|body_0|>\n\n def get_model(self, number_of_classes, weight_path):\n \"\"\"This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\"\"\"\n <|body_1|>\n\n def visualize_conv_layers(self, layer_name='conv1', savefig_path=''):\n \"\"\"Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\"\"\"\n <|body_2|>\n\n def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20):\n \"\"\"Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\"\"\"\n <|body_3|>\n\n def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''):\n \"\"\"compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hierarchy_mapping_name = os.path.join('..', VG_VisualModule_PICKLES_PATH, hierarchy_mapping_name)\n model_path = os.path.join('..', weights_name_dir, WEIGHTS_NAME)\n if not os.path.exists(model_path) or not os.path.exists(hierarchy_mapping_name):\n print('Error: No Weights have been found or No Hierarchy Mapping has been found in {0} or {1}'.format(model_path, hierarchy_mapping_name))\n raise Exception\n self.hierarchy_mapping_objects = cPickle.load(open(hierarchy_mapping_name))\n self.nof_classes = len(self.hierarchy_mapping_objects)\n self.config = Config(gpu_num)\n self.model = self.get_model(self.nof_classes, weight_path=model_path)\n<|end_body_0|>\n\n<|body_start_1|>\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n img_input = Input(shape=input_shape_img, name='image_input')\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer='he_normal', activation='softmax', name='fc')(model_resnet50)\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n model.summary()\n try:\n print('Start loading Weights')\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5', 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'))\n raise Exception(e)\n print('Finished successfully loading Model')\n return model\n<|end_body_1|>\n\n<|body_start_2|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_2|>\n\n<|body_start_3|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n vis_images = []\n for idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(last_layer)\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_3|>\n\n<|body_start_4|>\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n heatmaps = []\n for path in image_paths:\n seed_img = utils.load_img(path, target_size=(self.config.crop_height, self.config.crop_width))\n x = np.expand_dims(img_to_array(seed_img), axis=0)\n x = preprocess_input(x)\n pred_class = np.argmax(self.model.predict(x))\n heatmap = visualize_saliency(self.model, layer_idx, [pred_class], seed_img)\n heatmaps.append(heatmap)\n plt.axis('off')\n plt.imshow(utils.stitch_images(heatmaps))\n plt.title('Saliency map')\n plt.savefig(savefig_path)\n print('debug')\n<|end_body_4|>\n", "revision_id": "1b65b21474f923b799ff486eea9dea5137be6506", "skeleton": "<|skeleton|>\nclass NetworkVisualizer:\n \"\"\"This class is a network visualizer\"\"\"\n\n def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0):\n \"\"\"This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\"\"\"\n <|body_0|>\n\n def get_model(self, number_of_classes, weight_path):\n \"\"\"This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\"\"\"\n <|body_1|>\n\n def visualize_conv_layers(self, layer_name='conv1', savefig_path=''):\n \"\"\"Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\"\"\"\n <|body_2|>\n\n def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20):\n \"\"\"Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\"\"\"\n <|body_3|>\n\n def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''):\n \"\"\"compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NetworkVisualizer:\n \"\"\"This class is a network visualizer\"\"\"\n\n def __init__(self, weights_name_dir, hierarchy_mapping_name, gpu_num=0):\n \"\"\"This function initializes network visualizer :param weights_name_dir: the path for the weights :param hierarchy_mapping_name: the path for the hierarchy mapping :param gpu_num: 0 (default)\"\"\"\n hierarchy_mapping_name = os.path.join('..', VG_VisualModule_PICKLES_PATH, hierarchy_mapping_name)\n model_path = os.path.join('..', weights_name_dir, WEIGHTS_NAME)\n if not os.path.exists(model_path) or not os.path.exists(hierarchy_mapping_name):\n print('Error: No Weights have been found or No Hierarchy Mapping has been found in {0} or {1}'.format(model_path, hierarchy_mapping_name))\n raise Exception\n self.hierarchy_mapping_objects = cPickle.load(open(hierarchy_mapping_name))\n self.nof_classes = len(self.hierarchy_mapping_objects)\n self.config = Config(gpu_num)\n self.model = self.get_model(self.nof_classes, weight_path=model_path)\n\n def get_model(self, number_of_classes, weight_path):\n \"\"\"This function loads the model :param weight_path: model weights path :type number_of_classes: number of classes :return: model\"\"\"\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n img_input = Input(shape=input_shape_img, name='image_input')\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer='he_normal', activation='softmax', name='fc')(model_resnet50)\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n model.summary()\n try:\n print('Start loading Weights')\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format('https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5', 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'))\n raise Exception(e)\n print('Finished successfully loading Model')\n return model\n\n def visualize_conv_layers(self, layer_name='conv1', savefig_path=''):\n \"\"\"Each conv layer has several learned 'template matching' filters that maximize their output when a similar template pattern is found in the input image\"\"\"\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == layer_name][0]\n filters = np.arange(get_num_filters(self.model.layers[layer_idx]))\n vis_images = []\n for idx in filters:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images, cols=8)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(layer_name)\n plt.savefig(savefig_path)\n print('debug')\n\n def visualize_dense_layers(self, last_layer='fc', savefig_path='', nof_times=3, class_number=20):\n \"\"\"Generate an input image that maximizes the final Dense layer output corresponding to some class class. :param last_layer: last layer name :param savefig_path: where to save the figure :param class_number: which class number do we want to check (for example class number 30 is some predicate) :param nof_times: how many times do you want to check the class :return:\"\"\"\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n vis_images = []\n for idx in [class_number] * nof_times:\n img = visualize_activation(self.model, layer_idx, filter_indices=idx, max_iter=500)\n img = utils.draw_text(img, str(idx))\n vis_images.append(img)\n stitched = utils.stitch_images(vis_images)\n plt.axis('off')\n plt.imshow(stitched)\n plt.title(last_layer)\n plt.savefig(savefig_path)\n print('debug')\n\n def visualize_heat_maps(self, image_paths, last_layer='fc', savefig_path=''):\n \"\"\"compute the gradient of output category with respect to input image. This should tell us how output category value changes with respect to a small change in input image pixels. :param image_paths: a list of images which we want check. for example: image_paths = [\"https://www.kshs.org/cool2/graphics/dumbbell1lg.jpg\", \"http://tampaspeedboatadventures.com/wp-content/uploads/2010/10/DSC07011.jpg\", \"http://ichef-1.bbci.co.uk/news/660/cpsprodpb/1C24/production/_85540270_85540265.jpg\"] :param last_layer: last layer name :param savefig_path: where to save the figure :return:\"\"\"\n layer_idx = [idx for idx, layer in enumerate(self.model.layers) if layer.name == last_layer][0]\n heatmaps = []\n for path in image_paths:\n seed_img = utils.load_img(path, target_size=(self.config.crop_height, self.config.crop_width))\n x = np.expand_dims(img_to_array(seed_img), axis=0)\n x = preprocess_input(x)\n pred_class = np.argmax(self.model.predict(x))\n heatmap = visualize_saliency(self.model, layer_idx, [pred_class], seed_img)\n heatmaps.append(heatmap)\n plt.axis('off')\n plt.imshow(utils.stitch_images(heatmaps))\n plt.title('Saliency map')\n plt.savefig(savefig_path)\n print('debug')\n", "source": "the_stack_v2_python_sparse", "source_path": "Utils/NetworkVisualizer.py", "source_repo": "roeiherz/SceneGrapher", "split": "val", "star_events_count": 0} {"blob_id": "f28c2e1a08415fe1d2528d82fa7b2fa6c3bdf5d6", "bodies": ["self.entities = entities\nself.sources = sources\nself.created_before = created_before\nself.created_after = created_after\nself.modified_before = modified_before\nself.modified_after = modified_after\nself.properties = properties", "filter_request = {}\nif self.sources:\n filter_request['Types'] = list(map(lambda x: x.value if isinstance(x, LineageSourceEnum) else x, self.sources))\nif self.entities:\n filter_request['LineageTypes'] = list(map(lambda x: x.value if isinstance(x, LineageEntityEnum) else x, self.entities))\nif self.created_before:\n filter_request['CreatedBefore'] = self.created_before\nif self.created_after:\n filter_request['CreatedAfter'] = self.created_after\nif self.modified_before:\n filter_request['ModifiedBefore'] = self.modified_before\nif self.modified_after:\n filter_request['ModifiedAfter'] = self.modified_after\nif self.properties:\n filter_request['Properties'] = self.properties\nreturn filter_request"], "bodies_text": "<|body_start_0|>\n self.entities = entities\n self.sources = sources\n self.created_before = created_before\n self.created_after = created_after\n self.modified_before = modified_before\n self.modified_after = modified_after\n self.properties = properties\n<|end_body_0|>\n\n<|body_start_1|>\n filter_request = {}\n if self.sources:\n filter_request['Types'] = list(map(lambda x: x.value if isinstance(x, LineageSourceEnum) else x, self.sources))\n if self.entities:\n filter_request['LineageTypes'] = list(map(lambda x: x.value if isinstance(x, LineageEntityEnum) else x, self.entities))\n if self.created_before:\n filter_request['CreatedBefore'] = self.created_before\n if self.created_after:\n filter_request['CreatedAfter'] = self.created_after\n if self.modified_before:\n filter_request['ModifiedBefore'] = self.modified_before\n if self.modified_after:\n filter_request['ModifiedAfter'] = self.modified_after\n if self.properties:\n filter_request['Properties'] = self.properties\n return filter_request\n<|end_body_1|>\n", "class_docstring": "A filter used in a lineage query.", "class_name": "LineageFilter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LineageFilter:\n \"\"\"A filter used in a lineage query.\"\"\"\n\n def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None):\n \"\"\"Initialize ``LineageFilter`` instance.\"\"\"\n <|body_0|>\n\n def _to_request_dict(self):\n \"\"\"Convert the lineage filter to its API representation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.entities = entities\n self.sources = sources\n self.created_before = created_before\n self.created_after = created_after\n self.modified_before = modified_before\n self.modified_after = modified_after\n self.properties = properties\n<|end_body_0|>\n\n<|body_start_1|>\n filter_request = {}\n if self.sources:\n filter_request['Types'] = list(map(lambda x: x.value if isinstance(x, LineageSourceEnum) else x, self.sources))\n if self.entities:\n filter_request['LineageTypes'] = list(map(lambda x: x.value if isinstance(x, LineageEntityEnum) else x, self.entities))\n if self.created_before:\n filter_request['CreatedBefore'] = self.created_before\n if self.created_after:\n filter_request['CreatedAfter'] = self.created_after\n if self.modified_before:\n filter_request['ModifiedBefore'] = self.modified_before\n if self.modified_after:\n filter_request['ModifiedAfter'] = self.modified_after\n if self.properties:\n filter_request['Properties'] = self.properties\n return filter_request\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000073", "length_bytes": 27038, "license_type": "permissive", "methods": [{"docstring": "Initialize ``LineageFilter`` instance.", "name": "__init__", "signature": "def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None)"}, {"docstring": "Convert the lineage filter to its API representation.", "name": "_to_request_dict", "signature": "def _to_request_dict(self)"}], "n_methods": 2, "prompt": "Implement the Python class `LineageFilter` described below.\n\nClass description:\nA filter used in a lineage query.\n\nMethod signatures and docstrings:\n- def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None): Initialize ``LineageFilter`` instance.\n- def _to_request_dict(self): Convert the lineage filter to its API representation.", "prompted_full_text": "Implement the Python class `LineageFilter` described below.\n\nClass description:\nA filter used in a lineage query.\n\nMethod signatures and docstrings:\n- def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None): Initialize ``LineageFilter`` instance.\n- def _to_request_dict(self): Convert the lineage filter to its API representation.\n\n<|skeleton|>\nclass LineageFilter:\n \"\"\"A filter used in a lineage query.\"\"\"\n\n def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None):\n \"\"\"Initialize ``LineageFilter`` instance.\"\"\"\n <|body_0|>\n\n def _to_request_dict(self):\n \"\"\"Convert the lineage filter to its API representation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.entities = entities\n self.sources = sources\n self.created_before = created_before\n self.created_after = created_after\n self.modified_before = modified_before\n self.modified_after = modified_after\n self.properties = properties\n<|end_body_0|>\n\n<|body_start_1|>\n filter_request = {}\n if self.sources:\n filter_request['Types'] = list(map(lambda x: x.value if isinstance(x, LineageSourceEnum) else x, self.sources))\n if self.entities:\n filter_request['LineageTypes'] = list(map(lambda x: x.value if isinstance(x, LineageEntityEnum) else x, self.entities))\n if self.created_before:\n filter_request['CreatedBefore'] = self.created_before\n if self.created_after:\n filter_request['CreatedAfter'] = self.created_after\n if self.modified_before:\n filter_request['ModifiedBefore'] = self.modified_before\n if self.modified_after:\n filter_request['ModifiedAfter'] = self.modified_after\n if self.properties:\n filter_request['Properties'] = self.properties\n return filter_request\n<|end_body_1|>\n", "revision_id": "8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85", "skeleton": "<|skeleton|>\nclass LineageFilter:\n \"\"\"A filter used in a lineage query.\"\"\"\n\n def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None):\n \"\"\"Initialize ``LineageFilter`` instance.\"\"\"\n <|body_0|>\n\n def _to_request_dict(self):\n \"\"\"Convert the lineage filter to its API representation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LineageFilter:\n \"\"\"A filter used in a lineage query.\"\"\"\n\n def __init__(self, entities: Optional[List[Union[LineageEntityEnum, str]]]=None, sources: Optional[List[Union[LineageSourceEnum, str]]]=None, created_before: Optional[datetime]=None, created_after: Optional[datetime]=None, modified_before: Optional[datetime]=None, modified_after: Optional[datetime]=None, properties: Optional[Dict[str, str]]=None):\n \"\"\"Initialize ``LineageFilter`` instance.\"\"\"\n self.entities = entities\n self.sources = sources\n self.created_before = created_before\n self.created_after = created_after\n self.modified_before = modified_before\n self.modified_after = modified_after\n self.properties = properties\n\n def _to_request_dict(self):\n \"\"\"Convert the lineage filter to its API representation.\"\"\"\n filter_request = {}\n if self.sources:\n filter_request['Types'] = list(map(lambda x: x.value if isinstance(x, LineageSourceEnum) else x, self.sources))\n if self.entities:\n filter_request['LineageTypes'] = list(map(lambda x: x.value if isinstance(x, LineageEntityEnum) else x, self.entities))\n if self.created_before:\n filter_request['CreatedBefore'] = self.created_before\n if self.created_after:\n filter_request['CreatedAfter'] = self.created_after\n if self.modified_before:\n filter_request['ModifiedBefore'] = self.modified_before\n if self.modified_after:\n filter_request['ModifiedAfter'] = self.modified_after\n if self.properties:\n filter_request['Properties'] = self.properties\n return filter_request\n", "source": "the_stack_v2_python_sparse", "source_path": "src/sagemaker/lineage/query.py", "source_repo": "aws/sagemaker-python-sdk", "split": "val", "star_events_count": 2050} {"blob_id": "957cd4643f679242bdb9a27cfbbfc3c71777469c", "bodies": ["l = len(s)\ndp_is_palindrome = [[False] * l for _ in range(l)]\ndp_is_palindrome[0][0] = True\nfor i in range(l):\n dp_is_palindrome[i][i] = True\n if s[i] == s[i - 1]:\n dp_is_palindrome[i - 1][i] = True\nfor j in range(2, l):\n for i in range(0, j - 1):\n if s[i] == s[j] and dp_is_palindrome[i + 1][j - 1]:\n dp_is_palindrome[i][j] = True\nprint(np.array(dp_is_palindrome))\ndp = [l] * (l + 1)\ndp[0] = -1\nfor i in range(l):\n for j in range(i + 1):\n if dp_is_palindrome[j][i]:\n b = dp[j] + 1\n dp[i + 1] = min(dp[i + 1], b)\nprint(dp)\nreturn dp[-1]", "size = len(s)\ncut = range(-1, size)\nfor idx in range(1, size):\n for low, high in ((idx, idx), (idx - 1, idx)):\n while low >= 0 and high < size and (s[low] == s[high]):\n cut[high + 1] = min(cut[high + 1], cut[low] + 1)\n low -= 1\n high += 1\nreturn cut[-1]", "if s == s[::-1]:\n return 0\nfor i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\ncut = [x for x in range(-1, len(s))]\nfor i in range(len(s)):\n r1, r2 = (0, 0)\n while i - r1 >= 0 and i + r1 < len(s) and (s[i - r1] == s[i + r1]):\n cut[i + r1 + 1] = min(cut[i + r1 + 1], cut[i - r1] + 1)\n r1 += 1\n while i - r2 >= 0 and i + r2 + 1 < len(s) and (s[i - r2] == s[i + r2 + 1]):\n cut[i + r2 + 2] = min(cut[i + r2 + 2], cut[i - r2] + 1)\n r2 += 1\nreturn cut[-1]", "if s == s[::-1]:\n return 0\nfor i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\ncut = [x for x in range(-1, len(s))]\nfor i in range(len(s)):\n for j in range(i, len(s)):\n if s[i:j + 1] == s[i:j + 1][::-1]:\n cut[j + 1] = min(cut[j + 1], cut[i] + 1)\nreturn cut[-1]"], "bodies_text": "<|body_start_0|>\n l = len(s)\n dp_is_palindrome = [[False] * l for _ in range(l)]\n dp_is_palindrome[0][0] = True\n for i in range(l):\n dp_is_palindrome[i][i] = True\n if s[i] == s[i - 1]:\n dp_is_palindrome[i - 1][i] = True\n for j in range(2, l):\n for i in range(0, j - 1):\n if s[i] == s[j] and dp_is_palindrome[i + 1][j - 1]:\n dp_is_palindrome[i][j] = True\n print(np.array(dp_is_palindrome))\n dp = [l] * (l + 1)\n dp[0] = -1\n for i in range(l):\n for j in range(i + 1):\n if dp_is_palindrome[j][i]:\n b = dp[j] + 1\n dp[i + 1] = min(dp[i + 1], b)\n print(dp)\n return dp[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n size = len(s)\n cut = range(-1, size)\n for idx in range(1, size):\n for low, high in ((idx, idx), (idx - 1, idx)):\n while low >= 0 and high < size and (s[low] == s[high]):\n cut[high + 1] = min(cut[high + 1], cut[low] + 1)\n low -= 1\n high += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n r1, r2 = (0, 0)\n while i - r1 >= 0 and i + r1 < len(s) and (s[i - r1] == s[i + r1]):\n cut[i + r1 + 1] = min(cut[i + r1 + 1], cut[i - r1] + 1)\n r1 += 1\n while i - r2 >= 0 and i + r2 + 1 < len(s) and (s[i - r2] == s[i + r2 + 1]):\n cut[i + r2 + 2] = min(cut[i + r2 + 2], cut[i - r2] + 1)\n r2 += 1\n return cut[-1]\n<|end_body_2|>\n\n<|body_start_3|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n for j in range(i, len(s)):\n if s[i:j + 1] == s[i:j + 1][::-1]:\n cut[j + 1] = min(cut[j + 1], cut[i] + 1)\n return cut[-1]\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int 666ms\"\"\"\n <|body_0|>\n\n def minCut_1(self, s):\n \"\"\":type s: str :rtype: int 326ms\"\"\"\n <|body_1|>\n\n def minCut_2(self, s):\n \"\"\":type s: str :rtype: int 38ms\"\"\"\n <|body_2|>\n\n def minCut_3(self, s):\n \"\"\":type s: str :rtype: int 242ms\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = len(s)\n dp_is_palindrome = [[False] * l for _ in range(l)]\n dp_is_palindrome[0][0] = True\n for i in range(l):\n dp_is_palindrome[i][i] = True\n if s[i] == s[i - 1]:\n dp_is_palindrome[i - 1][i] = True\n for j in range(2, l):\n for i in range(0, j - 1):\n if s[i] == s[j] and dp_is_palindrome[i + 1][j - 1]:\n dp_is_palindrome[i][j] = True\n print(np.array(dp_is_palindrome))\n dp = [l] * (l + 1)\n dp[0] = -1\n for i in range(l):\n for j in range(i + 1):\n if dp_is_palindrome[j][i]:\n b = dp[j] + 1\n dp[i + 1] = min(dp[i + 1], b)\n print(dp)\n return dp[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n size = len(s)\n cut = range(-1, size)\n for idx in range(1, size):\n for low, high in ((idx, idx), (idx - 1, idx)):\n while low >= 0 and high < size and (s[low] == s[high]):\n cut[high + 1] = min(cut[high + 1], cut[low] + 1)\n low -= 1\n high += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n r1, r2 = (0, 0)\n while i - r1 >= 0 and i + r1 < len(s) and (s[i - r1] == s[i + r1]):\n cut[i + r1 + 1] = min(cut[i + r1 + 1], cut[i - r1] + 1)\n r1 += 1\n while i - r2 >= 0 and i + r2 + 1 < len(s) and (s[i - r2] == s[i + r2 + 1]):\n cut[i + r2 + 2] = min(cut[i + r2 + 2], cut[i - r2] + 1)\n r2 += 1\n return cut[-1]\n<|end_body_2|>\n\n<|body_start_3|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n for j in range(i, len(s)):\n if s[i:j + 1] == s[i:j + 1][::-1]:\n cut[j + 1] = min(cut[j + 1], cut[i] + 1)\n return cut[-1]\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000074", "length_bytes": 3026, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: int 666ms", "name": "minCut", "signature": "def minCut(self, s)"}, {"docstring": ":type s: str :rtype: int 326ms", "name": "minCut_1", "signature": "def minCut_1(self, s)"}, {"docstring": ":type s: str :rtype: int 38ms", "name": "minCut_2", "signature": "def minCut_2(self, s)"}, {"docstring": ":type s: str :rtype: int 242ms", "name": "minCut_3", "signature": "def minCut_3(self, s)"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s): :type s: str :rtype: int 666ms\n- def minCut_1(self, s): :type s: str :rtype: int 326ms\n- def minCut_2(self, s): :type s: str :rtype: int 38ms\n- def minCut_3(self, s): :type s: str :rtype: int 242ms", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s): :type s: str :rtype: int 666ms\n- def minCut_1(self, s): :type s: str :rtype: int 326ms\n- def minCut_2(self, s): :type s: str :rtype: int 38ms\n- def minCut_3(self, s): :type s: str :rtype: int 242ms\n\n<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int 666ms\"\"\"\n <|body_0|>\n\n def minCut_1(self, s):\n \"\"\":type s: str :rtype: int 326ms\"\"\"\n <|body_1|>\n\n def minCut_2(self, s):\n \"\"\":type s: str :rtype: int 38ms\"\"\"\n <|body_2|>\n\n def minCut_3(self, s):\n \"\"\":type s: str :rtype: int 242ms\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = len(s)\n dp_is_palindrome = [[False] * l for _ in range(l)]\n dp_is_palindrome[0][0] = True\n for i in range(l):\n dp_is_palindrome[i][i] = True\n if s[i] == s[i - 1]:\n dp_is_palindrome[i - 1][i] = True\n for j in range(2, l):\n for i in range(0, j - 1):\n if s[i] == s[j] and dp_is_palindrome[i + 1][j - 1]:\n dp_is_palindrome[i][j] = True\n print(np.array(dp_is_palindrome))\n dp = [l] * (l + 1)\n dp[0] = -1\n for i in range(l):\n for j in range(i + 1):\n if dp_is_palindrome[j][i]:\n b = dp[j] + 1\n dp[i + 1] = min(dp[i + 1], b)\n print(dp)\n return dp[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n size = len(s)\n cut = range(-1, size)\n for idx in range(1, size):\n for low, high in ((idx, idx), (idx - 1, idx)):\n while low >= 0 and high < size and (s[low] == s[high]):\n cut[high + 1] = min(cut[high + 1], cut[low] + 1)\n low -= 1\n high += 1\n return cut[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n r1, r2 = (0, 0)\n while i - r1 >= 0 and i + r1 < len(s) and (s[i - r1] == s[i + r1]):\n cut[i + r1 + 1] = min(cut[i + r1 + 1], cut[i - r1] + 1)\n r1 += 1\n while i - r2 >= 0 and i + r2 + 1 < len(s) and (s[i - r2] == s[i + r2 + 1]):\n cut[i + r2 + 2] = min(cut[i + r2 + 2], cut[i - r2] + 1)\n r2 += 1\n return cut[-1]\n<|end_body_2|>\n\n<|body_start_3|>\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n for j in range(i, len(s)):\n if s[i:j + 1] == s[i:j + 1][::-1]:\n cut[j + 1] = min(cut[j + 1], cut[i] + 1)\n return cut[-1]\n<|end_body_3|>\n", "revision_id": "679a2b246b8b6bb7fc55ed1c8096d3047d6d4461", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s):\n \"\"\":type s: str :rtype: int 666ms\"\"\"\n <|body_0|>\n\n def minCut_1(self, s):\n \"\"\":type s: str :rtype: int 326ms\"\"\"\n <|body_1|>\n\n def minCut_2(self, s):\n \"\"\":type s: str :rtype: int 38ms\"\"\"\n <|body_2|>\n\n def minCut_3(self, s):\n \"\"\":type s: str :rtype: int 242ms\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minCut(self, s):\n \"\"\":type s: str :rtype: int 666ms\"\"\"\n l = len(s)\n dp_is_palindrome = [[False] * l for _ in range(l)]\n dp_is_palindrome[0][0] = True\n for i in range(l):\n dp_is_palindrome[i][i] = True\n if s[i] == s[i - 1]:\n dp_is_palindrome[i - 1][i] = True\n for j in range(2, l):\n for i in range(0, j - 1):\n if s[i] == s[j] and dp_is_palindrome[i + 1][j - 1]:\n dp_is_palindrome[i][j] = True\n print(np.array(dp_is_palindrome))\n dp = [l] * (l + 1)\n dp[0] = -1\n for i in range(l):\n for j in range(i + 1):\n if dp_is_palindrome[j][i]:\n b = dp[j] + 1\n dp[i + 1] = min(dp[i + 1], b)\n print(dp)\n return dp[-1]\n\n def minCut_1(self, s):\n \"\"\":type s: str :rtype: int 326ms\"\"\"\n size = len(s)\n cut = range(-1, size)\n for idx in range(1, size):\n for low, high in ((idx, idx), (idx - 1, idx)):\n while low >= 0 and high < size and (s[low] == s[high]):\n cut[high + 1] = min(cut[high + 1], cut[low] + 1)\n low -= 1\n high += 1\n return cut[-1]\n\n def minCut_2(self, s):\n \"\"\":type s: str :rtype: int 38ms\"\"\"\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n r1, r2 = (0, 0)\n while i - r1 >= 0 and i + r1 < len(s) and (s[i - r1] == s[i + r1]):\n cut[i + r1 + 1] = min(cut[i + r1 + 1], cut[i - r1] + 1)\n r1 += 1\n while i - r2 >= 0 and i + r2 + 1 < len(s) and (s[i - r2] == s[i + r2 + 1]):\n cut[i + r2 + 2] = min(cut[i + r2 + 2], cut[i - r2] + 1)\n r2 += 1\n return cut[-1]\n\n def minCut_3(self, s):\n \"\"\":type s: str :rtype: int 242ms\"\"\"\n if s == s[::-1]:\n return 0\n for i in range(1, len(s)):\n if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:\n return 1\n cut = [x for x in range(-1, len(s))]\n for i in range(len(s)):\n for j in range(i, len(s)):\n if s[i:j + 1] == s[i:j + 1][::-1]:\n cut[j + 1] = min(cut[j + 1], cut[i] + 1)\n return cut[-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "PalindromePartitioningII_HARD_132.py", "source_repo": "953250587/leetcode-python", "split": "val", "star_events_count": 2} {"blob_id": "fe6a0db2ed2b1403fbcb28411ba12014fea84cf6", "bodies": ["self.name = self.__class__.__name__\nself.resources = {}\nself.subdevices = {}", "if hasattr(self, 'driver') and self.driver == drivers.lgpib:\n if hasattr(self, 'eos_char'):\n self.device.config(gpib.IbcEOSchar, ord(self.eos_char))\n self.device.config(gpib.IbcEOSrd, 1)\n try:\n log.debug('GPIB device IDN: {0!r}'.format(self.idn))\n except gpib.GpibError as e:\n raise DeviceNotFoundError('Could not open device at \"{0}\".'.format(self.connection_resource), e)\nelif str(self) == '':\n self.device.read_termination = u'\\r'\nfor name, subdev in self.subdevices.items():\n log.debug('Post-connection for subdevice \"{0}\".'.format(name))\n subdev._connected()"], "bodies_text": "<|body_start_0|>\n self.name = self.__class__.__name__\n self.resources = {}\n self.subdevices = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, 'driver') and self.driver == drivers.lgpib:\n if hasattr(self, 'eos_char'):\n self.device.config(gpib.IbcEOSchar, ord(self.eos_char))\n self.device.config(gpib.IbcEOSrd, 1)\n try:\n log.debug('GPIB device IDN: {0!r}'.format(self.idn))\n except gpib.GpibError as e:\n raise DeviceNotFoundError('Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif str(self) == '':\n self.device.read_termination = u'\\r'\n for name, subdev in self.subdevices.items():\n log.debug('Post-connection for subdevice \"{0}\".'.format(name))\n subdev._connected()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SuperDevice", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SuperDevice:\n\n def _setup(self):\n \"\"\"Pre-connection setup.\"\"\"\n <|body_0|>\n\n def _connected(self):\n \"\"\"Post-connection setup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = self.__class__.__name__\n self.resources = {}\n self.subdevices = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, 'driver') and self.driver == drivers.lgpib:\n if hasattr(self, 'eos_char'):\n self.device.config(gpib.IbcEOSchar, ord(self.eos_char))\n self.device.config(gpib.IbcEOSrd, 1)\n try:\n log.debug('GPIB device IDN: {0!r}'.format(self.idn))\n except gpib.GpibError as e:\n raise DeviceNotFoundError('Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif str(self) == '':\n self.device.read_termination = u'\\r'\n for name, subdev in self.subdevices.items():\n log.debug('Post-connection for subdevice \"{0}\".'.format(name))\n subdev._connected()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000075", "length_bytes": 18795, "license_type": "permissive", "methods": [{"docstring": "Pre-connection setup.", "name": "_setup", "signature": "def _setup(self)"}, {"docstring": "Post-connection setup.", "name": "_connected", "signature": "def _connected(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004944", "prompt": "Implement the Python class `SuperDevice` described below.\n\nClass description:\nImplement the SuperDevice class.\n\nMethod signatures and docstrings:\n- def _setup(self): Pre-connection setup.\n- def _connected(self): Post-connection setup.", "prompted_full_text": "Implement the Python class `SuperDevice` described below.\n\nClass description:\nImplement the SuperDevice class.\n\nMethod signatures and docstrings:\n- def _setup(self): Pre-connection setup.\n- def _connected(self): Post-connection setup.\n\n<|skeleton|>\nclass SuperDevice:\n\n def _setup(self):\n \"\"\"Pre-connection setup.\"\"\"\n <|body_0|>\n\n def _connected(self):\n \"\"\"Post-connection setup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = self.__class__.__name__\n self.resources = {}\n self.subdevices = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self, 'driver') and self.driver == drivers.lgpib:\n if hasattr(self, 'eos_char'):\n self.device.config(gpib.IbcEOSchar, ord(self.eos_char))\n self.device.config(gpib.IbcEOSrd, 1)\n try:\n log.debug('GPIB device IDN: {0!r}'.format(self.idn))\n except gpib.GpibError as e:\n raise DeviceNotFoundError('Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif str(self) == '':\n self.device.read_termination = u'\\r'\n for name, subdev in self.subdevices.items():\n log.debug('Post-connection for subdevice \"{0}\".'.format(name))\n subdev._connected()\n<|end_body_1|>\n", "revision_id": "f319a117fef7189d6dcc91124bd28ab3601e325e", "skeleton": "<|skeleton|>\nclass SuperDevice:\n\n def _setup(self):\n \"\"\"Pre-connection setup.\"\"\"\n <|body_0|>\n\n def _connected(self):\n \"\"\"Post-connection setup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SuperDevice:\n def _setup(self):\n \"\"\"Pre-connection setup.\"\"\"\n self.name = self.__class__.__name__\n self.resources = {}\n self.subdevices = {}\n\n def _connected(self):\n \"\"\"Post-connection setup.\"\"\"\n if hasattr(self, 'driver') and self.driver == drivers.lgpib:\n if hasattr(self, 'eos_char'):\n self.device.config(gpib.IbcEOSchar, ord(self.eos_char))\n self.device.config(gpib.IbcEOSrd, 1)\n try:\n log.debug('GPIB device IDN: {0!r}'.format(self.idn))\n except gpib.GpibError as e:\n raise DeviceNotFoundError('Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif str(self) == '':\n self.device.read_termination = u'\\r'\n for name, subdev in self.subdevices.items():\n log.debug('Post-connection for subdevice \"{0}\".'.format(name))\n subdev._connected()\n", "source": "the_stack_v2_python_sparse", "source_path": "spacq/devices/abstract_device.py", "source_repo": "mainCSG/SpanishAcquisitionIQC", "split": "val", "star_events_count": 1} {"blob_id": "b69b381a3671847a133a418d8b950dd064427f51", "bodies": ["if stream is not None:\n self.stream = stream\nelse:\n self.stream = StringIO()", "write = self.stream.write\ntptypes = getToolByName(target, 'portal_types', None)\nif tptypes is None:\n write('No portal_skins')\nelif not tptypes.getTypeInfo(type_name):\n tptypes.addType(type_name, fti[0])\n write('Added type object for %s \\n' % type_name)\nelse:\n write('Skipping type object for %s (already exists) \\n' % type_name)", "skins = getToolByName(target, 'portal_skins', None)\nwrite = self.stream.write\nif skins._getOb(view, None) is not None:\n write(\"Failed to register view '%s' (already exists)\\n\" % view)\n return view\nfound = 0\ndw_path = os.path.join(minimalpath(package_home(globals())), *view.split('/'))\ndw_path = re.sub('\\\\\\\\', '/', dw_path)\nfor dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith(dw_path):\n found = 1\n break\nif not found:\n write(\"Failed to register view '%s' (directory not found)\\n\" % view)\n return view\ndw_path = dw_path.replace('\\\\', '/')\nDirectoryView.manage_addDirectoryView(skins, dw_path)\nwrite(\"Registered view '%s' = '%s'\\n\" % (view, dw_path))\nreturn view", "self.skin_name = skin_name\nself.skin_path = skin_path\nskins = getToolByName(target, 'portal_skins', None)\nif skins is None:\n return\nskin_paths = skins.getSkinPaths()\ninclude = ()\nfound = 0\nfor id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter(lambda x, cp=paths: x not in cp, views) + paths\n found = 1\nif not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\nskins.manage_properties(add_skin=1, skinname=self.skin_name, skinpath=join(include, ', '))\nskins.default_skin = self.skin_name"], "bodies_text": "<|body_start_0|>\n if stream is not None:\n self.stream = stream\n else:\n self.stream = StringIO()\n<|end_body_0|>\n\n<|body_start_1|>\n write = self.stream.write\n tptypes = getToolByName(target, 'portal_types', None)\n if tptypes is None:\n write('No portal_skins')\n elif not tptypes.getTypeInfo(type_name):\n tptypes.addType(type_name, fti[0])\n write('Added type object for %s \\n' % type_name)\n else:\n write('Skipping type object for %s (already exists) \\n' % type_name)\n<|end_body_1|>\n\n<|body_start_2|>\n skins = getToolByName(target, 'portal_skins', None)\n write = self.stream.write\n if skins._getOb(view, None) is not None:\n write(\"Failed to register view '%s' (already exists)\\n\" % view)\n return view\n found = 0\n dw_path = os.path.join(minimalpath(package_home(globals())), *view.split('/'))\n dw_path = re.sub('\\\\\\\\', '/', dw_path)\n for dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith(dw_path):\n found = 1\n break\n if not found:\n write(\"Failed to register view '%s' (directory not found)\\n\" % view)\n return view\n dw_path = dw_path.replace('\\\\', '/')\n DirectoryView.manage_addDirectoryView(skins, dw_path)\n write(\"Registered view '%s' = '%s'\\n\" % (view, dw_path))\n return view\n<|end_body_2|>\n\n<|body_start_3|>\n self.skin_name = skin_name\n self.skin_path = skin_path\n skins = getToolByName(target, 'portal_skins', None)\n if skins is None:\n return\n skin_paths = skins.getSkinPaths()\n include = ()\n found = 0\n for id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter(lambda x, cp=paths: x not in cp, views) + paths\n found = 1\n if not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\n skins.manage_properties(add_skin=1, skinname=self.skin_name, skinpath=join(include, ', '))\n skins.default_skin = self.skin_name\n<|end_body_3|>\n", "class_docstring": "A suite of methods deploying CMF site", "class_name": "ManageCMFContent", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ManageCMFContent:\n \"\"\"A suite of methods deploying CMF site\"\"\"\n\n def __init__(self, stream=None):\n \"\"\"Stream is expected to be some writable file object, like a StringIO, that output will be sent to\"\"\"\n <|body_0|>\n\n def deploy_class(self, target, type_name, fti):\n \"\"\"Register a new type\"\"\"\n <|body_1|>\n\n def register_view(self, target, view):\n \"\"\"Register a directory view\"\"\"\n <|body_2|>\n\n def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'):\n \"\"\"Create a new skin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if stream is not None:\n self.stream = stream\n else:\n self.stream = StringIO()\n<|end_body_0|>\n\n<|body_start_1|>\n write = self.stream.write\n tptypes = getToolByName(target, 'portal_types', None)\n if tptypes is None:\n write('No portal_skins')\n elif not tptypes.getTypeInfo(type_name):\n tptypes.addType(type_name, fti[0])\n write('Added type object for %s \\n' % type_name)\n else:\n write('Skipping type object for %s (already exists) \\n' % type_name)\n<|end_body_1|>\n\n<|body_start_2|>\n skins = getToolByName(target, 'portal_skins', None)\n write = self.stream.write\n if skins._getOb(view, None) is not None:\n write(\"Failed to register view '%s' (already exists)\\n\" % view)\n return view\n found = 0\n dw_path = os.path.join(minimalpath(package_home(globals())), *view.split('/'))\n dw_path = re.sub('\\\\\\\\', '/', dw_path)\n for dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith(dw_path):\n found = 1\n break\n if not found:\n write(\"Failed to register view '%s' (directory not found)\\n\" % view)\n return view\n dw_path = dw_path.replace('\\\\', '/')\n DirectoryView.manage_addDirectoryView(skins, dw_path)\n write(\"Registered view '%s' = '%s'\\n\" % (view, dw_path))\n return view\n<|end_body_2|>\n\n<|body_start_3|>\n self.skin_name = skin_name\n self.skin_path = skin_path\n skins = getToolByName(target, 'portal_skins', None)\n if skins is None:\n return\n skin_paths = skins.getSkinPaths()\n include = ()\n found = 0\n for id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter(lambda x, cp=paths: x not in cp, views) + paths\n found = 1\n if not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\n skins.manage_properties(add_skin=1, skinname=self.skin_name, skinpath=join(include, ', '))\n skins.default_skin = self.skin_name\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000076", "length_bytes": 3370, "license_type": "no_license", "methods": [{"docstring": "Stream is expected to be some writable file object, like a StringIO, that output will be sent to", "name": "__init__", "signature": "def __init__(self, stream=None)"}, {"docstring": "Register a new type", "name": "deploy_class", "signature": "def deploy_class(self, target, type_name, fti)"}, {"docstring": "Register a directory view", "name": "register_view", "signature": "def register_view(self, target, view)"}, {"docstring": "Create a new skin", "name": "set_skin", "signature": "def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images')"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003881", "prompt": "Implement the Python class `ManageCMFContent` described below.\n\nClass description:\nA suite of methods deploying CMF site\n\nMethod signatures and docstrings:\n- def __init__(self, stream=None): Stream is expected to be some writable file object, like a StringIO, that output will be sent to\n- def deploy_class(self, target, type_name, fti): Register a new type\n- def register_view(self, target, view): Register a directory view\n- def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'): Create a new skin", "prompted_full_text": "Implement the Python class `ManageCMFContent` described below.\n\nClass description:\nA suite of methods deploying CMF site\n\nMethod signatures and docstrings:\n- def __init__(self, stream=None): Stream is expected to be some writable file object, like a StringIO, that output will be sent to\n- def deploy_class(self, target, type_name, fti): Register a new type\n- def register_view(self, target, view): Register a directory view\n- def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'): Create a new skin\n\n<|skeleton|>\nclass ManageCMFContent:\n \"\"\"A suite of methods deploying CMF site\"\"\"\n\n def __init__(self, stream=None):\n \"\"\"Stream is expected to be some writable file object, like a StringIO, that output will be sent to\"\"\"\n <|body_0|>\n\n def deploy_class(self, target, type_name, fti):\n \"\"\"Register a new type\"\"\"\n <|body_1|>\n\n def register_view(self, target, view):\n \"\"\"Register a directory view\"\"\"\n <|body_2|>\n\n def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'):\n \"\"\"Create a new skin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if stream is not None:\n self.stream = stream\n else:\n self.stream = StringIO()\n<|end_body_0|>\n\n<|body_start_1|>\n write = self.stream.write\n tptypes = getToolByName(target, 'portal_types', None)\n if tptypes is None:\n write('No portal_skins')\n elif not tptypes.getTypeInfo(type_name):\n tptypes.addType(type_name, fti[0])\n write('Added type object for %s \\n' % type_name)\n else:\n write('Skipping type object for %s (already exists) \\n' % type_name)\n<|end_body_1|>\n\n<|body_start_2|>\n skins = getToolByName(target, 'portal_skins', None)\n write = self.stream.write\n if skins._getOb(view, None) is not None:\n write(\"Failed to register view '%s' (already exists)\\n\" % view)\n return view\n found = 0\n dw_path = os.path.join(minimalpath(package_home(globals())), *view.split('/'))\n dw_path = re.sub('\\\\\\\\', '/', dw_path)\n for dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith(dw_path):\n found = 1\n break\n if not found:\n write(\"Failed to register view '%s' (directory not found)\\n\" % view)\n return view\n dw_path = dw_path.replace('\\\\', '/')\n DirectoryView.manage_addDirectoryView(skins, dw_path)\n write(\"Registered view '%s' = '%s'\\n\" % (view, dw_path))\n return view\n<|end_body_2|>\n\n<|body_start_3|>\n self.skin_name = skin_name\n self.skin_path = skin_path\n skins = getToolByName(target, 'portal_skins', None)\n if skins is None:\n return\n skin_paths = skins.getSkinPaths()\n include = ()\n found = 0\n for id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter(lambda x, cp=paths: x not in cp, views) + paths\n found = 1\n if not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\n skins.manage_properties(add_skin=1, skinname=self.skin_name, skinpath=join(include, ', '))\n skins.default_skin = self.skin_name\n<|end_body_3|>\n", "revision_id": "bdf3ad7c1ec4bcdec08000bf4ac5315ca6a0ad19", "skeleton": "<|skeleton|>\nclass ManageCMFContent:\n \"\"\"A suite of methods deploying CMF site\"\"\"\n\n def __init__(self, stream=None):\n \"\"\"Stream is expected to be some writable file object, like a StringIO, that output will be sent to\"\"\"\n <|body_0|>\n\n def deploy_class(self, target, type_name, fti):\n \"\"\"Register a new type\"\"\"\n <|body_1|>\n\n def register_view(self, target, view):\n \"\"\"Register a directory view\"\"\"\n <|body_2|>\n\n def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'):\n \"\"\"Create a new skin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ManageCMFContent:\n \"\"\"A suite of methods deploying CMF site\"\"\"\n\n def __init__(self, stream=None):\n \"\"\"Stream is expected to be some writable file object, like a StringIO, that output will be sent to\"\"\"\n if stream is not None:\n self.stream = stream\n else:\n self.stream = StringIO()\n\n def deploy_class(self, target, type_name, fti):\n \"\"\"Register a new type\"\"\"\n write = self.stream.write\n tptypes = getToolByName(target, 'portal_types', None)\n if tptypes is None:\n write('No portal_skins')\n elif not tptypes.getTypeInfo(type_name):\n tptypes.addType(type_name, fti[0])\n write('Added type object for %s \\n' % type_name)\n else:\n write('Skipping type object for %s (already exists) \\n' % type_name)\n\n def register_view(self, target, view):\n \"\"\"Register a directory view\"\"\"\n skins = getToolByName(target, 'portal_skins', None)\n write = self.stream.write\n if skins._getOb(view, None) is not None:\n write(\"Failed to register view '%s' (already exists)\\n\" % view)\n return view\n found = 0\n dw_path = os.path.join(minimalpath(package_home(globals())), *view.split('/'))\n dw_path = re.sub('\\\\\\\\', '/', dw_path)\n for dir_path in DirectoryView.manage_listAvailableDirectories():\n if dir_path.endswith(dw_path):\n found = 1\n break\n if not found:\n write(\"Failed to register view '%s' (directory not found)\\n\" % view)\n return view\n dw_path = dw_path.replace('\\\\', '/')\n DirectoryView.manage_addDirectoryView(skins, dw_path)\n write(\"Registered view '%s' = '%s'\\n\" % (view, dw_path))\n return view\n\n def set_skin(self, target, views, skin_name='Site', skin_path='custom, topic, content, generic, control, Images'):\n \"\"\"Create a new skin\"\"\"\n self.skin_name = skin_name\n self.skin_path = skin_path\n skins = getToolByName(target, 'portal_skins', None)\n if skins is None:\n return\n skin_paths = skins.getSkinPaths()\n include = ()\n found = 0\n for id, path in skin_paths:\n if id == self.skin_name:\n paths = path.split(', ')\n include = filter(lambda x, cp=paths: x not in cp, views) + paths\n found = 1\n if not found:\n default_path = self.skin_path.split(', ')\n include = views + default_path\n skins.manage_properties(add_skin=1, skinname=self.skin_name, skinpath=join(include, ', '))\n skins.default_skin = self.skin_name\n", "source": "the_stack_v2_python_sparse", "source_path": "ExpressSuiteTools/ManageCMFContent.py", "source_repo": "ichar/Express-Suite-DMS", "split": "val", "star_events_count": 0} {"blob_id": "ead0fae3389d71eb8d925130acc3e55a94eb37a0", "bodies": ["validation_details = self._validate_keywords_extraction_params(request.data)\nif not validation_details['status']:\n return Response(validation_details['error_data'], status=status.HTTP_400_BAD_REQUEST)\nparams = validation_details['params']\ndoc = params['document']\nargs = (doc, params['max_grams']) if params['max_grams'] else (doc,)\nkey_ngrams = get_key_ngrams(*args, include_numbers=params.get('include_numbers', False))\nreturn Response(key_ngrams)", "errors = {}\nparams = {}\nif not queryparams.get('document'):\n errors['document'] = 'document should be present'\nelse:\n params['document'] = queryparams['document']\nif queryparams.get('max_grams'):\n try:\n params['max_grams'] = int(queryparams['max_grams'])\n if params['max_grams'] < 1:\n raise Exception\n except Exception as e:\n errors['max_grams'] = 'max_grams, if present, should be a positive integer'\nelse:\n params['max_grams'] = None\ninc_numbers = queryparams.get('include_numbers')\nif inc_numbers and (inc_numbers == 'true' or inc_numbers == '1'):\n params['include_numbers'] = True\nif errors:\n return {'status': False, 'error_data': errors}\nreturn {'status': True, 'params': params}"], "bodies_text": "<|body_start_0|>\n validation_details = self._validate_keywords_extraction_params(request.data)\n if not validation_details['status']:\n return Response(validation_details['error_data'], status=status.HTTP_400_BAD_REQUEST)\n params = validation_details['params']\n doc = params['document']\n args = (doc, params['max_grams']) if params['max_grams'] else (doc,)\n key_ngrams = get_key_ngrams(*args, include_numbers=params.get('include_numbers', False))\n return Response(key_ngrams)\n<|end_body_0|>\n\n<|body_start_1|>\n errors = {}\n params = {}\n if not queryparams.get('document'):\n errors['document'] = 'document should be present'\n else:\n params['document'] = queryparams['document']\n if queryparams.get('max_grams'):\n try:\n params['max_grams'] = int(queryparams['max_grams'])\n if params['max_grams'] < 1:\n raise Exception\n except Exception as e:\n errors['max_grams'] = 'max_grams, if present, should be a positive integer'\n else:\n params['max_grams'] = None\n inc_numbers = queryparams.get('include_numbers')\n if inc_numbers and (inc_numbers == 'true' or inc_numbers == '1'):\n params['include_numbers'] = True\n if errors:\n return {'status': False, 'error_data': errors}\n return {'status': True, 'params': params}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KeywordsExtractionView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KeywordsExtractionView:\n\n def post(self, request):\n \"\"\"Handle API POST request\"\"\"\n <|body_0|>\n\n def _validate_keywords_extraction_params(self, queryparams):\n \"\"\"Validator for params\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n validation_details = self._validate_keywords_extraction_params(request.data)\n if not validation_details['status']:\n return Response(validation_details['error_data'], status=status.HTTP_400_BAD_REQUEST)\n params = validation_details['params']\n doc = params['document']\n args = (doc, params['max_grams']) if params['max_grams'] else (doc,)\n key_ngrams = get_key_ngrams(*args, include_numbers=params.get('include_numbers', False))\n return Response(key_ngrams)\n<|end_body_0|>\n\n<|body_start_1|>\n errors = {}\n params = {}\n if not queryparams.get('document'):\n errors['document'] = 'document should be present'\n else:\n params['document'] = queryparams['document']\n if queryparams.get('max_grams'):\n try:\n params['max_grams'] = int(queryparams['max_grams'])\n if params['max_grams'] < 1:\n raise Exception\n except Exception as e:\n errors['max_grams'] = 'max_grams, if present, should be a positive integer'\n else:\n params['max_grams'] = None\n inc_numbers = queryparams.get('include_numbers')\n if inc_numbers and (inc_numbers == 'true' or inc_numbers == '1'):\n params['include_numbers'] = True\n if errors:\n return {'status': False, 'error_data': errors}\n return {'status': True, 'params': params}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000077", "length_bytes": 18288, "license_type": "permissive", "methods": [{"docstring": "Handle API POST request", "name": "post", "signature": "def post(self, request)"}, {"docstring": "Validator for params", "name": "_validate_keywords_extraction_params", "signature": "def _validate_keywords_extraction_params(self, queryparams)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005253", "prompt": "Implement the Python class `KeywordsExtractionView` described below.\n\nClass description:\nImplement the KeywordsExtractionView class.\n\nMethod signatures and docstrings:\n- def post(self, request): Handle API POST request\n- def _validate_keywords_extraction_params(self, queryparams): Validator for params", "prompted_full_text": "Implement the Python class `KeywordsExtractionView` described below.\n\nClass description:\nImplement the KeywordsExtractionView class.\n\nMethod signatures and docstrings:\n- def post(self, request): Handle API POST request\n- def _validate_keywords_extraction_params(self, queryparams): Validator for params\n\n<|skeleton|>\nclass KeywordsExtractionView:\n\n def post(self, request):\n \"\"\"Handle API POST request\"\"\"\n <|body_0|>\n\n def _validate_keywords_extraction_params(self, queryparams):\n \"\"\"Validator for params\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n validation_details = self._validate_keywords_extraction_params(request.data)\n if not validation_details['status']:\n return Response(validation_details['error_data'], status=status.HTTP_400_BAD_REQUEST)\n params = validation_details['params']\n doc = params['document']\n args = (doc, params['max_grams']) if params['max_grams'] else (doc,)\n key_ngrams = get_key_ngrams(*args, include_numbers=params.get('include_numbers', False))\n return Response(key_ngrams)\n<|end_body_0|>\n\n<|body_start_1|>\n errors = {}\n params = {}\n if not queryparams.get('document'):\n errors['document'] = 'document should be present'\n else:\n params['document'] = queryparams['document']\n if queryparams.get('max_grams'):\n try:\n params['max_grams'] = int(queryparams['max_grams'])\n if params['max_grams'] < 1:\n raise Exception\n except Exception as e:\n errors['max_grams'] = 'max_grams, if present, should be a positive integer'\n else:\n params['max_grams'] = None\n inc_numbers = queryparams.get('include_numbers')\n if inc_numbers and (inc_numbers == 'true' or inc_numbers == '1'):\n params['include_numbers'] = True\n if errors:\n return {'status': False, 'error_data': errors}\n return {'status': True, 'params': params}\n<|end_body_1|>\n", "revision_id": "93f7bf7d61d7424250d01c1fc510347375d767c4", "skeleton": "<|skeleton|>\nclass KeywordsExtractionView:\n\n def post(self, request):\n \"\"\"Handle API POST request\"\"\"\n <|body_0|>\n\n def _validate_keywords_extraction_params(self, queryparams):\n \"\"\"Validator for params\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KeywordsExtractionView:\n def post(self, request):\n \"\"\"Handle API POST request\"\"\"\n validation_details = self._validate_keywords_extraction_params(request.data)\n if not validation_details['status']:\n return Response(validation_details['error_data'], status=status.HTTP_400_BAD_REQUEST)\n params = validation_details['params']\n doc = params['document']\n args = (doc, params['max_grams']) if params['max_grams'] else (doc,)\n key_ngrams = get_key_ngrams(*args, include_numbers=params.get('include_numbers', False))\n return Response(key_ngrams)\n\n def _validate_keywords_extraction_params(self, queryparams):\n \"\"\"Validator for params\"\"\"\n errors = {}\n params = {}\n if not queryparams.get('document'):\n errors['document'] = 'document should be present'\n else:\n params['document'] = queryparams['document']\n if queryparams.get('max_grams'):\n try:\n params['max_grams'] = int(queryparams['max_grams'])\n if params['max_grams'] < 1:\n raise Exception\n except Exception as e:\n errors['max_grams'] = 'max_grams, if present, should be a positive integer'\n else:\n params['max_grams'] = None\n inc_numbers = queryparams.get('include_numbers')\n if inc_numbers and (inc_numbers == 'true' or inc_numbers == '1'):\n params['include_numbers'] = True\n if errors:\n return {'status': False, 'error_data': errors}\n return {'status': True, 'params': params}\n", "source": "the_stack_v2_python_sparse", "source_path": "api/views/view_main.py", "source_repo": "the-deep/DEEPL", "split": "val", "star_events_count": 6} {"blob_id": "77ea59edc75bc37bbb84ebc9e8b4a6a458150b94", "bodies": ["name = read_unicode_string(fp)\nclassID = read_length_and_key(fp)\nreturn cls(name, classID)", "written = write_unicode_string(fp, self.name)\nwritten += write_length_and_key(fp, self.classID)\nreturn written"], "bodies_text": "<|body_start_0|>\n name = read_unicode_string(fp)\n classID = read_length_and_key(fp)\n return cls(name, classID)\n<|end_body_0|>\n\n<|body_start_1|>\n written = write_unicode_string(fp, self.name)\n written += write_length_and_key(fp, self.classID)\n return written\n<|end_body_1|>\n", "class_docstring": "Class structure. .. py:attribute:: name .. py:attribute:: classID", "class_name": "Class", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Class:\n \"\"\"Class structure. .. py:attribute:: name .. py:attribute:: classID\"\"\"\n\n def read(cls, fp):\n \"\"\"Read the element from a file-like object. :param fp: file-like object\"\"\"\n <|body_0|>\n\n def write(self, fp):\n \"\"\"Write the element to a file-like object. :param fp: file-like object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n name = read_unicode_string(fp)\n classID = read_length_and_key(fp)\n return cls(name, classID)\n<|end_body_0|>\n\n<|body_start_1|>\n written = write_unicode_string(fp, self.name)\n written += write_length_and_key(fp, self.classID)\n return written\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000078", "length_bytes": 19890, "license_type": "permissive", "methods": [{"docstring": "Read the element from a file-like object. :param fp: file-like object", "name": "read", "signature": "def read(cls, fp)"}, {"docstring": "Write the element to a file-like object. :param fp: file-like object", "name": "write", "signature": "def write(self, fp)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005224", "prompt": "Implement the Python class `Class` described below.\n\nClass description:\nClass structure. .. py:attribute:: name .. py:attribute:: classID\n\nMethod signatures and docstrings:\n- def read(cls, fp): Read the element from a file-like object. :param fp: file-like object\n- def write(self, fp): Write the element to a file-like object. :param fp: file-like object", "prompted_full_text": "Implement the Python class `Class` described below.\n\nClass description:\nClass structure. .. py:attribute:: name .. py:attribute:: classID\n\nMethod signatures and docstrings:\n- def read(cls, fp): Read the element from a file-like object. :param fp: file-like object\n- def write(self, fp): Write the element to a file-like object. :param fp: file-like object\n\n<|skeleton|>\nclass Class:\n \"\"\"Class structure. .. py:attribute:: name .. py:attribute:: classID\"\"\"\n\n def read(cls, fp):\n \"\"\"Read the element from a file-like object. :param fp: file-like object\"\"\"\n <|body_0|>\n\n def write(self, fp):\n \"\"\"Write the element to a file-like object. :param fp: file-like object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n name = read_unicode_string(fp)\n classID = read_length_and_key(fp)\n return cls(name, classID)\n<|end_body_0|>\n\n<|body_start_1|>\n written = write_unicode_string(fp, self.name)\n written += write_length_and_key(fp, self.classID)\n return written\n<|end_body_1|>\n", "revision_id": "0e3ac5b64061c7eb87c6eeacce4b9792d1f479b5", "skeleton": "<|skeleton|>\nclass Class:\n \"\"\"Class structure. .. py:attribute:: name .. py:attribute:: classID\"\"\"\n\n def read(cls, fp):\n \"\"\"Read the element from a file-like object. :param fp: file-like object\"\"\"\n <|body_0|>\n\n def write(self, fp):\n \"\"\"Write the element to a file-like object. :param fp: file-like object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Class:\n \"\"\"Class structure. .. py:attribute:: name .. py:attribute:: classID\"\"\"\n\n def read(cls, fp):\n \"\"\"Read the element from a file-like object. :param fp: file-like object\"\"\"\n name = read_unicode_string(fp)\n classID = read_length_and_key(fp)\n return cls(name, classID)\n\n def write(self, fp):\n \"\"\"Write the element to a file-like object. :param fp: file-like object\"\"\"\n written = write_unicode_string(fp, self.name)\n written += write_length_and_key(fp, self.classID)\n return written\n", "source": "the_stack_v2_python_sparse", "source_path": "psd_tools/psd/descriptor.py", "source_repo": "sfneal/psd-tools3", "split": "val", "star_events_count": 30} {"blob_id": "2fa59c0b732eb404a7d2f4189d510c9b5a21325b", "bodies": ["if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\nif isinstance(value, PuddleLiquid):\n return value\nmapping = dict()\nif hasattr(PuddleLiquid, 'WATER'):\n mapping[CommonPuddleLiquid.WATER] = PuddleLiquid.WATER\nif hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[CommonPuddleLiquid.DARK_MATTER] = getattr(PuddleLiquid, 'Dark Matter')\nif hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[CommonPuddleLiquid.GREEN_GOO] = PuddleLiquid.GreenGoo\nif hasattr(PuddleLiquid, 'Vomit'):\n mapping[CommonPuddleLiquid.VOMIT] = PuddleLiquid.Vomit\nif hasattr(PuddleLiquid, 'Mud'):\n mapping[CommonPuddleLiquid.MUD] = PuddleLiquid.Mud\nif hasattr(PuddleLiquid, 'Acid'):\n mapping[CommonPuddleLiquid.ACID] = PuddleLiquid.Acid\nreturn mapping.get(value, PuddleLiquid.INVALID)", "if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\nif isinstance(value, CommonPuddleLiquid):\n return value\nmapping = dict()\nif hasattr(PuddleLiquid, 'WATER'):\n mapping[PuddleLiquid.WATER] = CommonPuddleLiquid.WATER\nif hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[getattr(PuddleLiquid, 'Dark Matter')] = CommonPuddleLiquid.DARK_MATTER\nif hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[PuddleLiquid.GreenGoo] = CommonPuddleLiquid.GREEN_GOO\nif hasattr(PuddleLiquid, 'Vomit'):\n mapping[PuddleLiquid.Vomit] = CommonPuddleLiquid.VOMIT\nif hasattr(PuddleLiquid, 'Mud'):\n mapping[PuddleLiquid.Mud] = CommonPuddleLiquid.MUD\nif hasattr(PuddleLiquid, 'Acid'):\n mapping[PuddleLiquid.Acid] = CommonPuddleLiquid.ACID\nreturn mapping.get(value, PuddleLiquid.INVALID)"], "bodies_text": "<|body_start_0|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, PuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[CommonPuddleLiquid.WATER] = PuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[CommonPuddleLiquid.DARK_MATTER] = getattr(PuddleLiquid, 'Dark Matter')\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[CommonPuddleLiquid.GREEN_GOO] = PuddleLiquid.GreenGoo\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[CommonPuddleLiquid.VOMIT] = PuddleLiquid.Vomit\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[CommonPuddleLiquid.MUD] = PuddleLiquid.Mud\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[CommonPuddleLiquid.ACID] = PuddleLiquid.Acid\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, CommonPuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[PuddleLiquid.WATER] = CommonPuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[getattr(PuddleLiquid, 'Dark Matter')] = CommonPuddleLiquid.DARK_MATTER\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[PuddleLiquid.GreenGoo] = CommonPuddleLiquid.GREEN_GOO\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[PuddleLiquid.Vomit] = CommonPuddleLiquid.VOMIT\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[PuddleLiquid.Mud] = CommonPuddleLiquid.MUD\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[PuddleLiquid.Acid] = CommonPuddleLiquid.ACID\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_1|>\n", "class_docstring": "Various types of liquids a puddle may have.", "class_name": "CommonPuddleLiquid", "detected_licenses": ["CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommonPuddleLiquid:\n \"\"\"Various types of liquids a puddle may have.\"\"\"\n\n def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]:\n \"\"\"convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\"\"\"\n <|body_0|>\n\n def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]:\n \"\"\"convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, PuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[CommonPuddleLiquid.WATER] = PuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[CommonPuddleLiquid.DARK_MATTER] = getattr(PuddleLiquid, 'Dark Matter')\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[CommonPuddleLiquid.GREEN_GOO] = PuddleLiquid.GreenGoo\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[CommonPuddleLiquid.VOMIT] = PuddleLiquid.Vomit\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[CommonPuddleLiquid.MUD] = PuddleLiquid.Mud\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[CommonPuddleLiquid.ACID] = PuddleLiquid.Acid\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, CommonPuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[PuddleLiquid.WATER] = CommonPuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[getattr(PuddleLiquid, 'Dark Matter')] = CommonPuddleLiquid.DARK_MATTER\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[PuddleLiquid.GreenGoo] = CommonPuddleLiquid.GREEN_GOO\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[PuddleLiquid.Vomit] = CommonPuddleLiquid.VOMIT\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[PuddleLiquid.Mud] = CommonPuddleLiquid.MUD\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[PuddleLiquid.Acid] = CommonPuddleLiquid.ACID\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000079", "length_bytes": 3607, "license_type": "permissive", "methods": [{"docstring": "convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]", "name": "convert_to_vanilla", "signature": "def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]"}, {"docstring": "convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]", "name": "convert_from_vanilla", "signature": "def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003836", "prompt": "Implement the Python class `CommonPuddleLiquid` described below.\n\nClass description:\nVarious types of liquids a puddle may have.\n\nMethod signatures and docstrings:\n- def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]: convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\n- def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]: convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]", "prompted_full_text": "Implement the Python class `CommonPuddleLiquid` described below.\n\nClass description:\nVarious types of liquids a puddle may have.\n\nMethod signatures and docstrings:\n- def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]: convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\n- def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]: convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]\n\n<|skeleton|>\nclass CommonPuddleLiquid:\n \"\"\"Various types of liquids a puddle may have.\"\"\"\n\n def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]:\n \"\"\"convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\"\"\"\n <|body_0|>\n\n def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]:\n \"\"\"convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, PuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[CommonPuddleLiquid.WATER] = PuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[CommonPuddleLiquid.DARK_MATTER] = getattr(PuddleLiquid, 'Dark Matter')\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[CommonPuddleLiquid.GREEN_GOO] = PuddleLiquid.GreenGoo\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[CommonPuddleLiquid.VOMIT] = PuddleLiquid.Vomit\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[CommonPuddleLiquid.MUD] = PuddleLiquid.Mud\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[CommonPuddleLiquid.ACID] = PuddleLiquid.Acid\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, CommonPuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[PuddleLiquid.WATER] = CommonPuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[getattr(PuddleLiquid, 'Dark Matter')] = CommonPuddleLiquid.DARK_MATTER\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[PuddleLiquid.GreenGoo] = CommonPuddleLiquid.GREEN_GOO\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[PuddleLiquid.Vomit] = CommonPuddleLiquid.VOMIT\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[PuddleLiquid.Mud] = CommonPuddleLiquid.MUD\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[PuddleLiquid.Acid] = CommonPuddleLiquid.ACID\n return mapping.get(value, PuddleLiquid.INVALID)\n<|end_body_1|>\n", "revision_id": "58e7beb30b9c818b294d35abd2436a0192cd3e82", "skeleton": "<|skeleton|>\nclass CommonPuddleLiquid:\n \"\"\"Various types of liquids a puddle may have.\"\"\"\n\n def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]:\n \"\"\"convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\"\"\"\n <|body_0|>\n\n def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]:\n \"\"\"convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommonPuddleLiquid:\n \"\"\"Various types of liquids a puddle may have.\"\"\"\n\n def convert_to_vanilla(value: 'CommonPuddleLiquid') -> Union[PuddleLiquid, None]:\n \"\"\"convert_to_vanilla(value) Convert a value into the vanilla PuddleLiquid enum. :param value: An instance of a CommonPuddleLiquid :type value: CommonPuddleLiquid :return: The specified value translated to a PuddleLiquid or INVALID if the value could not be translated. :rtype: Union[PuddleLiquid, None]\"\"\"\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, PuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[CommonPuddleLiquid.WATER] = PuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[CommonPuddleLiquid.DARK_MATTER] = getattr(PuddleLiquid, 'Dark Matter')\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[CommonPuddleLiquid.GREEN_GOO] = PuddleLiquid.GreenGoo\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[CommonPuddleLiquid.VOMIT] = PuddleLiquid.Vomit\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[CommonPuddleLiquid.MUD] = PuddleLiquid.Mud\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[CommonPuddleLiquid.ACID] = PuddleLiquid.Acid\n return mapping.get(value, PuddleLiquid.INVALID)\n\n def convert_from_vanilla(value: PuddleLiquid) -> Union['CommonPuddleLiquid', None]:\n \"\"\"convert_from_vanilla(value) Convert a value into a CommonPuddleLiquid enum. :param value: An instance of a PuddleLiquid :type value: PuddleLiquid :return: The specified value translated to a CommonPuddleLiquid or INVALID if the value could not be translated. :rtype: Union['CommonPuddleLiquid', None]\"\"\"\n if value is None or value == CommonPuddleLiquid.INVALID:\n return PuddleLiquid.INVALID\n if isinstance(value, CommonPuddleLiquid):\n return value\n mapping = dict()\n if hasattr(PuddleLiquid, 'WATER'):\n mapping[PuddleLiquid.WATER] = CommonPuddleLiquid.WATER\n if hasattr(PuddleLiquid, 'Dark Matter'):\n mapping[getattr(PuddleLiquid, 'Dark Matter')] = CommonPuddleLiquid.DARK_MATTER\n if hasattr(PuddleLiquid, 'GreenGoo'):\n mapping[PuddleLiquid.GreenGoo] = CommonPuddleLiquid.GREEN_GOO\n if hasattr(PuddleLiquid, 'Vomit'):\n mapping[PuddleLiquid.Vomit] = CommonPuddleLiquid.VOMIT\n if hasattr(PuddleLiquid, 'Mud'):\n mapping[PuddleLiquid.Mud] = CommonPuddleLiquid.MUD\n if hasattr(PuddleLiquid, 'Acid'):\n mapping[PuddleLiquid.Acid] = CommonPuddleLiquid.ACID\n return mapping.get(value, PuddleLiquid.INVALID)\n", "source": "the_stack_v2_python_sparse", "source_path": "Scripts/sims4communitylib/enums/common_puddle_liquid.py", "source_repo": "ColonolNutty/Sims4CommunityLibrary", "split": "val", "star_events_count": 183} {"blob_id": "a0ddd5fcfae36a337bb1a6a35a0d26b8b6a78e0b", "bodies": ["if isinstance(attribute, XAttributeDiscrete):\n return XAttributeDiscrete\nelif isinstance(attribute, XAttributeLiteral):\n return XAttributeLiteral\nelif isinstance(attribute, XAttributeContinuous):\n return XAttributeContinuous\nelif isinstance(attribute, XAttributeBoolean):\n return XAttributeBoolean\nelif isinstance(attribute, XAttributeID):\n return XAttributeID\nelif isinstance(attribute, XAttributeList):\n return XAttributeList\nelif isinstance(attribute, XAttributeContainer):\n return XAttributeContainer\nelif isinstance(attribute, XAttributeTimestamp):\n return XAttributeTimestamp\nelse:\n raise TypeError('Unexpected attribute type!')", "if isinstance(attribute, XAttributeDiscrete):\n return 'DISCRETE'\nelif isinstance(attribute, XAttributeLiteral):\n return 'LITERAL'\nelif isinstance(attribute, XAttributeContinuous):\n return 'CONTINUOUS'\nelif isinstance(attribute, XAttributeBoolean):\n return 'BOOLEAN'\nelif isinstance(attribute, XAttributeID):\n return 'ID'\nelif isinstance(attribute, XAttributeList):\n return 'LIST'\nelif isinstance(attribute, XAttributeContainer):\n return 'CONTAINER'\nelif isinstance(attribute, XAttributeTimestamp):\n return 'TIMESTAMP'\nelse:\n raise TypeError('Unexpected attribute type!')", "prototype = instance.clone()\nif not isinstance(prototype, XAttributeList) and (not isinstance(prototype, XAttributeContainer)):\n if isinstance(prototype, XAttributeLiteral):\n prototype.set_value('UNKNOWN')\n elif isinstance(prototype, XAttributeBoolean):\n prototype.set_value(True)\n elif isinstance(prototype, XAttributeContinuous):\n prototype.set_value(0.0)\n elif isinstance(prototype, XAttributeDiscrete):\n prototype.set_value(0)\n elif isinstance(prototype, XAttributeTimestamp):\n prototype.set_value_millies(0)\n elif isinstance(prototype, XAttributeID):\n prototype.set_value(XIDFactory().create_id())\n else:\n raise TypeError('Unexpected attribute type!')\nreturn prototype"], "bodies_text": "<|body_start_0|>\n if isinstance(attribute, XAttributeDiscrete):\n return XAttributeDiscrete\n elif isinstance(attribute, XAttributeLiteral):\n return XAttributeLiteral\n elif isinstance(attribute, XAttributeContinuous):\n return XAttributeContinuous\n elif isinstance(attribute, XAttributeBoolean):\n return XAttributeBoolean\n elif isinstance(attribute, XAttributeID):\n return XAttributeID\n elif isinstance(attribute, XAttributeList):\n return XAttributeList\n elif isinstance(attribute, XAttributeContainer):\n return XAttributeContainer\n elif isinstance(attribute, XAttributeTimestamp):\n return XAttributeTimestamp\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(attribute, XAttributeDiscrete):\n return 'DISCRETE'\n elif isinstance(attribute, XAttributeLiteral):\n return 'LITERAL'\n elif isinstance(attribute, XAttributeContinuous):\n return 'CONTINUOUS'\n elif isinstance(attribute, XAttributeBoolean):\n return 'BOOLEAN'\n elif isinstance(attribute, XAttributeID):\n return 'ID'\n elif isinstance(attribute, XAttributeList):\n return 'LIST'\n elif isinstance(attribute, XAttributeContainer):\n return 'CONTAINER'\n elif isinstance(attribute, XAttributeTimestamp):\n return 'TIMESTAMP'\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_1|>\n\n<|body_start_2|>\n prototype = instance.clone()\n if not isinstance(prototype, XAttributeList) and (not isinstance(prototype, XAttributeContainer)):\n if isinstance(prototype, XAttributeLiteral):\n prototype.set_value('UNKNOWN')\n elif isinstance(prototype, XAttributeBoolean):\n prototype.set_value(True)\n elif isinstance(prototype, XAttributeContinuous):\n prototype.set_value(0.0)\n elif isinstance(prototype, XAttributeDiscrete):\n prototype.set_value(0)\n elif isinstance(prototype, XAttributeTimestamp):\n prototype.set_value_millies(0)\n elif isinstance(prototype, XAttributeID):\n prototype.set_value(XIDFactory().create_id())\n else:\n raise TypeError('Unexpected attribute type!')\n return prototype\n<|end_body_2|>\n", "class_docstring": "Utilities for working with attributes.", "class_name": "XAttributeUtils", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XAttributeUtils:\n \"\"\"Utilities for working with attributes.\"\"\"\n\n def get_type(attribute):\n \"\"\"For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\"\"\"\n <|body_0|>\n\n def get_type_string(attribute):\n \"\"\"For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\"\"\"\n <|body_1|>\n\n def derive_prototype(instance):\n \"\"\"Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(attribute, XAttributeDiscrete):\n return XAttributeDiscrete\n elif isinstance(attribute, XAttributeLiteral):\n return XAttributeLiteral\n elif isinstance(attribute, XAttributeContinuous):\n return XAttributeContinuous\n elif isinstance(attribute, XAttributeBoolean):\n return XAttributeBoolean\n elif isinstance(attribute, XAttributeID):\n return XAttributeID\n elif isinstance(attribute, XAttributeList):\n return XAttributeList\n elif isinstance(attribute, XAttributeContainer):\n return XAttributeContainer\n elif isinstance(attribute, XAttributeTimestamp):\n return XAttributeTimestamp\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(attribute, XAttributeDiscrete):\n return 'DISCRETE'\n elif isinstance(attribute, XAttributeLiteral):\n return 'LITERAL'\n elif isinstance(attribute, XAttributeContinuous):\n return 'CONTINUOUS'\n elif isinstance(attribute, XAttributeBoolean):\n return 'BOOLEAN'\n elif isinstance(attribute, XAttributeID):\n return 'ID'\n elif isinstance(attribute, XAttributeList):\n return 'LIST'\n elif isinstance(attribute, XAttributeContainer):\n return 'CONTAINER'\n elif isinstance(attribute, XAttributeTimestamp):\n return 'TIMESTAMP'\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_1|>\n\n<|body_start_2|>\n prototype = instance.clone()\n if not isinstance(prototype, XAttributeList) and (not isinstance(prototype, XAttributeContainer)):\n if isinstance(prototype, XAttributeLiteral):\n prototype.set_value('UNKNOWN')\n elif isinstance(prototype, XAttributeBoolean):\n prototype.set_value(True)\n elif isinstance(prototype, XAttributeContinuous):\n prototype.set_value(0.0)\n elif isinstance(prototype, XAttributeDiscrete):\n prototype.set_value(0)\n elif isinstance(prototype, XAttributeTimestamp):\n prototype.set_value_millies(0)\n elif isinstance(prototype, XAttributeID):\n prototype.set_value(XIDFactory().create_id())\n else:\n raise TypeError('Unexpected attribute type!')\n return prototype\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000080", "length_bytes": 4322, "license_type": "no_license", "methods": [{"docstring": "For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type", "name": "get_type", "signature": "def get_type(attribute)"}, {"docstring": "For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str", "name": "get_type_string", "signature": "def get_type_string(attribute)"}, {"docstring": "Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute", "name": "derive_prototype", "signature": "def derive_prototype(instance)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004739", "prompt": "Implement the Python class `XAttributeUtils` described below.\n\nClass description:\nUtilities for working with attributes.\n\nMethod signatures and docstrings:\n- def get_type(attribute): For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\n- def get_type_string(attribute): For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\n- def derive_prototype(instance): Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute", "prompted_full_text": "Implement the Python class `XAttributeUtils` described below.\n\nClass description:\nUtilities for working with attributes.\n\nMethod signatures and docstrings:\n- def get_type(attribute): For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\n- def get_type_string(attribute): For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\n- def derive_prototype(instance): Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute\n\n<|skeleton|>\nclass XAttributeUtils:\n \"\"\"Utilities for working with attributes.\"\"\"\n\n def get_type(attribute):\n \"\"\"For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\"\"\"\n <|body_0|>\n\n def get_type_string(attribute):\n \"\"\"For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\"\"\"\n <|body_1|>\n\n def derive_prototype(instance):\n \"\"\"Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(attribute, XAttributeDiscrete):\n return XAttributeDiscrete\n elif isinstance(attribute, XAttributeLiteral):\n return XAttributeLiteral\n elif isinstance(attribute, XAttributeContinuous):\n return XAttributeContinuous\n elif isinstance(attribute, XAttributeBoolean):\n return XAttributeBoolean\n elif isinstance(attribute, XAttributeID):\n return XAttributeID\n elif isinstance(attribute, XAttributeList):\n return XAttributeList\n elif isinstance(attribute, XAttributeContainer):\n return XAttributeContainer\n elif isinstance(attribute, XAttributeTimestamp):\n return XAttributeTimestamp\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(attribute, XAttributeDiscrete):\n return 'DISCRETE'\n elif isinstance(attribute, XAttributeLiteral):\n return 'LITERAL'\n elif isinstance(attribute, XAttributeContinuous):\n return 'CONTINUOUS'\n elif isinstance(attribute, XAttributeBoolean):\n return 'BOOLEAN'\n elif isinstance(attribute, XAttributeID):\n return 'ID'\n elif isinstance(attribute, XAttributeList):\n return 'LIST'\n elif isinstance(attribute, XAttributeContainer):\n return 'CONTAINER'\n elif isinstance(attribute, XAttributeTimestamp):\n return 'TIMESTAMP'\n else:\n raise TypeError('Unexpected attribute type!')\n<|end_body_1|>\n\n<|body_start_2|>\n prototype = instance.clone()\n if not isinstance(prototype, XAttributeList) and (not isinstance(prototype, XAttributeContainer)):\n if isinstance(prototype, XAttributeLiteral):\n prototype.set_value('UNKNOWN')\n elif isinstance(prototype, XAttributeBoolean):\n prototype.set_value(True)\n elif isinstance(prototype, XAttributeContinuous):\n prototype.set_value(0.0)\n elif isinstance(prototype, XAttributeDiscrete):\n prototype.set_value(0)\n elif isinstance(prototype, XAttributeTimestamp):\n prototype.set_value_millies(0)\n elif isinstance(prototype, XAttributeID):\n prototype.set_value(XIDFactory().create_id())\n else:\n raise TypeError('Unexpected attribute type!')\n return prototype\n<|end_body_2|>\n", "revision_id": "b21d43650448d474cfa678c61ac02689859d6826", "skeleton": "<|skeleton|>\nclass XAttributeUtils:\n \"\"\"Utilities for working with attributes.\"\"\"\n\n def get_type(attribute):\n \"\"\"For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\"\"\"\n <|body_0|>\n\n def get_type_string(attribute):\n \"\"\"For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\"\"\"\n <|body_1|>\n\n def derive_prototype(instance):\n \"\"\"Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class XAttributeUtils:\n \"\"\"Utilities for working with attributes.\"\"\"\n\n def get_type(attribute):\n \"\"\"For the given attribute, returns its type, i.e., the most high-level, typed interface this attribute implements. :param attribute: Attribute to analyze. :type attribute: XAttribute :return: Class of this attribute. :rtype: type\"\"\"\n if isinstance(attribute, XAttributeDiscrete):\n return XAttributeDiscrete\n elif isinstance(attribute, XAttributeLiteral):\n return XAttributeLiteral\n elif isinstance(attribute, XAttributeContinuous):\n return XAttributeContinuous\n elif isinstance(attribute, XAttributeBoolean):\n return XAttributeBoolean\n elif isinstance(attribute, XAttributeID):\n return XAttributeID\n elif isinstance(attribute, XAttributeList):\n return XAttributeList\n elif isinstance(attribute, XAttributeContainer):\n return XAttributeContainer\n elif isinstance(attribute, XAttributeTimestamp):\n return XAttributeTimestamp\n else:\n raise TypeError('Unexpected attribute type!')\n\n def get_type_string(attribute):\n \"\"\"For the given attribute, derives the standardized string describing the attributes specific type (used, e.g., for serialization). :param attribute: Attribute to extract type string from. :type attribute: XAttribute :return: String representation of the attribute's specific type. :rtype: str\"\"\"\n if isinstance(attribute, XAttributeDiscrete):\n return 'DISCRETE'\n elif isinstance(attribute, XAttributeLiteral):\n return 'LITERAL'\n elif isinstance(attribute, XAttributeContinuous):\n return 'CONTINUOUS'\n elif isinstance(attribute, XAttributeBoolean):\n return 'BOOLEAN'\n elif isinstance(attribute, XAttributeID):\n return 'ID'\n elif isinstance(attribute, XAttributeList):\n return 'LIST'\n elif isinstance(attribute, XAttributeContainer):\n return 'CONTAINER'\n elif isinstance(attribute, XAttributeTimestamp):\n return 'TIMESTAMP'\n else:\n raise TypeError('Unexpected attribute type!')\n\n def derive_prototype(instance):\n \"\"\"Derives a prototype for the given attribute. This prototype attribute will be equal in all respects, expect for the value of the attribute. This value will be set to a default value, depending on the specific type of the given attribute. :param instance: Attribute to derive prototype from. :type instance: XAttribute :return: The derived prototype attribute. :rtype: XAttribute\"\"\"\n prototype = instance.clone()\n if not isinstance(prototype, XAttributeList) and (not isinstance(prototype, XAttributeContainer)):\n if isinstance(prototype, XAttributeLiteral):\n prototype.set_value('UNKNOWN')\n elif isinstance(prototype, XAttributeBoolean):\n prototype.set_value(True)\n elif isinstance(prototype, XAttributeContinuous):\n prototype.set_value(0.0)\n elif isinstance(prototype, XAttributeDiscrete):\n prototype.set_value(0)\n elif isinstance(prototype, XAttributeTimestamp):\n prototype.set_value_millies(0)\n elif isinstance(prototype, XAttributeID):\n prototype.set_value(XIDFactory().create_id())\n else:\n raise TypeError('Unexpected attribute type!')\n return prototype\n", "source": "the_stack_v2_python_sparse", "source_path": "opyenxes/utils/XAttributeUtils.py", "source_repo": "TKasekamp/OpyenXes", "split": "val", "star_events_count": 0} {"blob_id": "3ee4b072e1c54fc37139b7cc3c02b779dfe2248f", "bodies": ["super().__init__(coordinator=coordinator)\nself.entity_description = entity_description\nself.entity_id = f'{SENSOR_DOMAIN}.{entity_description.key}'\nself._attr_unique_id = f'{entry_id}_{entity_description.key}'\nself._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Forecast.Solar', model=coordinator.data.account_type.value, name='Solar production forecast', configuration_url='https://forecast.solar')", "if self.entity_description.state is None:\n state: StateType | datetime = getattr(self.coordinator.data, self.entity_description.key)\nelse:\n state = self.entity_description.state(self.coordinator.data)\nreturn state"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator=coordinator)\n self.entity_description = entity_description\n self.entity_id = f'{SENSOR_DOMAIN}.{entity_description.key}'\n self._attr_unique_id = f'{entry_id}_{entity_description.key}'\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Forecast.Solar', model=coordinator.data.account_type.value, name='Solar production forecast', configuration_url='https://forecast.solar')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.entity_description.state is None:\n state: StateType | datetime = getattr(self.coordinator.data, self.entity_description.key)\n else:\n state = self.entity_description.state(self.coordinator.data)\n return state\n<|end_body_1|>\n", "class_docstring": "Defines a Forecast.Solar sensor.", "class_name": "ForecastSolarSensorEntity", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ForecastSolarSensorEntity:\n \"\"\"Defines a Forecast.Solar sensor.\"\"\"\n\n def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None:\n \"\"\"Initialize Forecast.Solar sensor.\"\"\"\n <|body_0|>\n\n def native_value(self) -> datetime | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator=coordinator)\n self.entity_description = entity_description\n self.entity_id = f'{SENSOR_DOMAIN}.{entity_description.key}'\n self._attr_unique_id = f'{entry_id}_{entity_description.key}'\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Forecast.Solar', model=coordinator.data.account_type.value, name='Solar production forecast', configuration_url='https://forecast.solar')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.entity_description.state is None:\n state: StateType | datetime = getattr(self.coordinator.data, self.entity_description.key)\n else:\n state = self.entity_description.state(self.coordinator.data)\n return state\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000081", "length_bytes": 7317, "license_type": "permissive", "methods": [{"docstring": "Initialize Forecast.Solar sensor.", "name": "__init__", "signature": "def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None"}, {"docstring": "Return the state of the sensor.", "name": "native_value", "signature": "def native_value(self) -> datetime | StateType"}], "n_methods": 2, "prompt": "Implement the Python class `ForecastSolarSensorEntity` described below.\n\nClass description:\nDefines a Forecast.Solar sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None: Initialize Forecast.Solar sensor.\n- def native_value(self) -> datetime | StateType: Return the state of the sensor.", "prompted_full_text": "Implement the Python class `ForecastSolarSensorEntity` described below.\n\nClass description:\nDefines a Forecast.Solar sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None: Initialize Forecast.Solar sensor.\n- def native_value(self) -> datetime | StateType: Return the state of the sensor.\n\n<|skeleton|>\nclass ForecastSolarSensorEntity:\n \"\"\"Defines a Forecast.Solar sensor.\"\"\"\n\n def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None:\n \"\"\"Initialize Forecast.Solar sensor.\"\"\"\n <|body_0|>\n\n def native_value(self) -> datetime | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator=coordinator)\n self.entity_description = entity_description\n self.entity_id = f'{SENSOR_DOMAIN}.{entity_description.key}'\n self._attr_unique_id = f'{entry_id}_{entity_description.key}'\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Forecast.Solar', model=coordinator.data.account_type.value, name='Solar production forecast', configuration_url='https://forecast.solar')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.entity_description.state is None:\n state: StateType | datetime = getattr(self.coordinator.data, self.entity_description.key)\n else:\n state = self.entity_description.state(self.coordinator.data)\n return state\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass ForecastSolarSensorEntity:\n \"\"\"Defines a Forecast.Solar sensor.\"\"\"\n\n def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None:\n \"\"\"Initialize Forecast.Solar sensor.\"\"\"\n <|body_0|>\n\n def native_value(self) -> datetime | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ForecastSolarSensorEntity:\n \"\"\"Defines a Forecast.Solar sensor.\"\"\"\n\n def __init__(self, *, entry_id: str, coordinator: ForecastSolarDataUpdateCoordinator, entity_description: ForecastSolarSensorEntityDescription) -> None:\n \"\"\"Initialize Forecast.Solar sensor.\"\"\"\n super().__init__(coordinator=coordinator)\n self.entity_description = entity_description\n self.entity_id = f'{SENSOR_DOMAIN}.{entity_description.key}'\n self._attr_unique_id = f'{entry_id}_{entity_description.key}'\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Forecast.Solar', model=coordinator.data.account_type.value, name='Solar production forecast', configuration_url='https://forecast.solar')\n\n def native_value(self) -> datetime | StateType:\n \"\"\"Return the state of the sensor.\"\"\"\n if self.entity_description.state is None:\n state: StateType | datetime = getattr(self.coordinator.data, self.entity_description.key)\n else:\n state = self.entity_description.state(self.coordinator.data)\n return state\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/forecast_solar/sensor.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "9b22f6942e14f6103426bfa0e6421ab08b20d898", "bodies": ["def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\ndef make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\nself.accept, self.reject = (make_match(accept), make_match(reject))\nself.omit = to_set(omit)\nif auto_omit and self.accept:\n self.omit.update((k for k, v in self.accept.items() if len(v) == 1))\nself.normalizers = normalizers or {}\nif keys_by_type is None:\n self.keys_by_type = None\nelse:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple((i for i in v if i not in self.omit))", "def normal(key):\n v = msg.get(key)\n if v is None:\n return v\n normalizer = self.normalizers.get(key, lambda x: x)\n return normalizer(v)\n\ndef odict(keys):\n return collections.OrderedDict(((k, normal(k)) for k in keys))\n\ndef match(m):\n return (msg.get(k) in v for k, v in m.items()) if m else ()\naccept = all(match(self.accept))\nreject = any(match(self.reject))\nif reject or not accept:\n keys = ()\nelif self.keys_by_type is None:\n keys = [k for k in msg.keys() if k not in self.omit]\nelse:\n keys = self.keys_by_type.get(msg.get('type'))\nreturn odict(keys)"], "bodies_text": "<|body_start_0|>\n def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\n def make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\n self.accept, self.reject = (make_match(accept), make_match(reject))\n self.omit = to_set(omit)\n if auto_omit and self.accept:\n self.omit.update((k for k, v in self.accept.items() if len(v) == 1))\n self.normalizers = normalizers or {}\n if keys_by_type is None:\n self.keys_by_type = None\n else:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple((i for i in v if i not in self.omit))\n<|end_body_0|>\n\n<|body_start_1|>\n def normal(key):\n v = msg.get(key)\n if v is None:\n return v\n normalizer = self.normalizers.get(key, lambda x: x)\n return normalizer(v)\n\n def odict(keys):\n return collections.OrderedDict(((k, normal(k)) for k in keys))\n\n def match(m):\n return (msg.get(k) in v for k, v in m.items()) if m else ()\n accept = all(match(self.accept))\n reject = any(match(self.reject))\n if reject or not accept:\n keys = ()\n elif self.keys_by_type is None:\n keys = [k for k in msg.keys() if k not in self.omit]\n else:\n keys = self.keys_by_type.get(msg.get('type'))\n return odict(keys)\n<|end_body_1|>\n", "class_docstring": "Extractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.", "class_name": "Extractor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Extractor:\n \"\"\"Extractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\"\"\"\n\n def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True):\n \"\"\"Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\"\"\"\n <|body_0|>\n\n def extract(self, msg):\n \"\"\"Yield an ordered dictionary if msg['type'] is in keys_by_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\n def make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\n self.accept, self.reject = (make_match(accept), make_match(reject))\n self.omit = to_set(omit)\n if auto_omit and self.accept:\n self.omit.update((k for k, v in self.accept.items() if len(v) == 1))\n self.normalizers = normalizers or {}\n if keys_by_type is None:\n self.keys_by_type = None\n else:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple((i for i in v if i not in self.omit))\n<|end_body_0|>\n\n<|body_start_1|>\n def normal(key):\n v = msg.get(key)\n if v is None:\n return v\n normalizer = self.normalizers.get(key, lambda x: x)\n return normalizer(v)\n\n def odict(keys):\n return collections.OrderedDict(((k, normal(k)) for k in keys))\n\n def match(m):\n return (msg.get(k) in v for k, v in m.items()) if m else ()\n accept = all(match(self.accept))\n reject = any(match(self.reject))\n if reject or not accept:\n keys = ()\n elif self.keys_by_type is None:\n keys = [k for k in msg.keys() if k not in self.omit]\n else:\n keys = self.keys_by_type.get(msg.get('type'))\n return odict(keys)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000082", "length_bytes": 3337, "license_type": "permissive", "methods": [{"docstring": "Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut", "name": "__init__", "signature": "def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True)"}, {"docstring": "Yield an ordered dictionary if msg['type'] is in keys_by_type.", "name": "extract", "signature": "def extract(self, msg)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002295", "prompt": "Implement the Python class `Extractor` described below.\n\nClass description:\nExtractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\n\nMethod signatures and docstrings:\n- def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True): Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\n- def extract(self, msg): Yield an ordered dictionary if msg['type'] is in keys_by_type.", "prompted_full_text": "Implement the Python class `Extractor` described below.\n\nClass description:\nExtractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\n\nMethod signatures and docstrings:\n- def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True): Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\n- def extract(self, msg): Yield an ordered dictionary if msg['type'] is in keys_by_type.\n\n<|skeleton|>\nclass Extractor:\n \"\"\"Extractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\"\"\"\n\n def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True):\n \"\"\"Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\"\"\"\n <|body_0|>\n\n def extract(self, msg):\n \"\"\"Yield an ordered dictionary if msg['type'] is in keys_by_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\n def make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\n self.accept, self.reject = (make_match(accept), make_match(reject))\n self.omit = to_set(omit)\n if auto_omit and self.accept:\n self.omit.update((k for k, v in self.accept.items() if len(v) == 1))\n self.normalizers = normalizers or {}\n if keys_by_type is None:\n self.keys_by_type = None\n else:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple((i for i in v if i not in self.omit))\n<|end_body_0|>\n\n<|body_start_1|>\n def normal(key):\n v = msg.get(key)\n if v is None:\n return v\n normalizer = self.normalizers.get(key, lambda x: x)\n return normalizer(v)\n\n def odict(keys):\n return collections.OrderedDict(((k, normal(k)) for k in keys))\n\n def match(m):\n return (msg.get(k) in v for k, v in m.items()) if m else ()\n accept = all(match(self.accept))\n reject = any(match(self.reject))\n if reject or not accept:\n keys = ()\n elif self.keys_by_type is None:\n keys = [k for k in msg.keys() if k not in self.omit]\n else:\n keys = self.keys_by_type.get(msg.get('type'))\n return odict(keys)\n<|end_body_1|>\n", "revision_id": "3faac7450678aaccd4a283d0d41ca3e7f113f51b", "skeleton": "<|skeleton|>\nclass Extractor:\n \"\"\"Extractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\"\"\"\n\n def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True):\n \"\"\"Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\"\"\"\n <|body_0|>\n\n def extract(self, msg):\n \"\"\"Yield an ordered dictionary if msg['type'] is in keys_by_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Extractor:\n \"\"\"Extractor is a class that extracts and normalizes values from incoming message dictionaries into ordered dictionaries based on the `type` key of each message.\"\"\"\n\n def __init__(self, omit=None, normalizers=None, keys_by_type=None, accept=None, reject=None, auto_omit=True):\n \"\"\"Arguments omit -- A list of keys that will not be extracted. normalizers -- Some keys also need to be \"normalized\" - scaled and offset so they are between 0 and 1, or -1 and 1. The `normalizers` table maps key names to a function that normalizes the value of that key. keys_by_type -- `keys_by_type` is a dictionary from the `type` in an incoming message to a list of message keys to be extracted accept -- maps keys to a value or a list of values that are accepted for that key. A message has to match *all* entries in `accept` to be accepted. reject -- map key to a value or a list of values that are not accepted for that key. A message is rejected if it matches *any* entry in the reject map. aut\"\"\"\n def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\n def make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\n self.accept, self.reject = (make_match(accept), make_match(reject))\n self.omit = to_set(omit)\n if auto_omit and self.accept:\n self.omit.update((k for k, v in self.accept.items() if len(v) == 1))\n self.normalizers = normalizers or {}\n if keys_by_type is None:\n self.keys_by_type = None\n else:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple((i for i in v if i not in self.omit))\n\n def extract(self, msg):\n \"\"\"Yield an ordered dictionary if msg['type'] is in keys_by_type.\"\"\"\n def normal(key):\n v = msg.get(key)\n if v is None:\n return v\n normalizer = self.normalizers.get(key, lambda x: x)\n return normalizer(v)\n\n def odict(keys):\n return collections.OrderedDict(((k, normal(k)) for k in keys))\n\n def match(m):\n return (msg.get(k) in v for k, v in m.items()) if m else ()\n accept = all(match(self.accept))\n reject = any(match(self.reject))\n if reject or not accept:\n keys = ()\n elif self.keys_by_type is None:\n keys = [k for k in msg.keys() if k not in self.omit]\n else:\n keys = self.keys_by_type.get(msg.get('type'))\n return odict(keys)\n", "source": "the_stack_v2_python_sparse", "source_path": "timedata/control/extractor.py", "source_repo": "timedata-org/timedata", "split": "val", "star_events_count": 5} {"blob_id": "7aee97c7f6b32a4010a912bdbe3a2976d521f68f", "bodies": ["self.Agent = Agent\nself.alpha = alpha\nself.gamma = gamma\nself.epsilon = epsilon\nself.policy = dict()\nself.Q = dict()\nself.V = dict()\nS = set([(i, j) for i in range(-5, 6) for j in range(-5, 6)])\nfor s in S:\n self.V[s] = numpy.float16(0)\n self.Q[s] = dict()\n self.policy[s] = dict()\n for a in self.Agent.actions:\n self.policy[s][a] = numpy.float16(1.0 / len(self.Agent.actions))\n for o in self.Agent.actions:\n self.Q[s][a, o] = numpy.float16(0.0)", "self.Q[s][a, o] = (1 - self.alpha) * self.Q[s][a, o] + self.alpha * (r + self.gamma * self.V[s_prime])\nminimum = None\nfor a in self.Agent.actions:\n for o in self.Agent.actions:\n if self.Q[s][a, o] < minimum or minimum == None:\n minimum = self.Q[s][a, o]\nself.V[s] = minimum"], "bodies_text": "<|body_start_0|>\n self.Agent = Agent\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy = dict()\n self.Q = dict()\n self.V = dict()\n S = set([(i, j) for i in range(-5, 6) for j in range(-5, 6)])\n for s in S:\n self.V[s] = numpy.float16(0)\n self.Q[s] = dict()\n self.policy[s] = dict()\n for a in self.Agent.actions:\n self.policy[s][a] = numpy.float16(1.0 / len(self.Agent.actions))\n for o in self.Agent.actions:\n self.Q[s][a, o] = numpy.float16(0.0)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Q[s][a, o] = (1 - self.alpha) * self.Q[s][a, o] + self.alpha * (r + self.gamma * self.V[s_prime])\n minimum = None\n for a in self.Agent.actions:\n for o in self.Agent.actions:\n if self.Q[s][a, o] < minimum or minimum == None:\n minimum = self.Q[s][a, o]\n self.V[s] = minimum\n<|end_body_1|>\n", "class_docstring": "Implementation of functions related to Q-learning.", "class_name": "TeamQLearning", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TeamQLearning:\n \"\"\"Implementation of functions related to Q-learning.\"\"\"\n\n def __init__(self, Agent, alpha, gamma, epsilon):\n \"\"\"Fill all values of Q based on a given optimistic value.\"\"\"\n <|body_0|>\n\n def updateQ(self, s, a, o, s_prime, r):\n \"\"\"Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Agent = Agent\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy = dict()\n self.Q = dict()\n self.V = dict()\n S = set([(i, j) for i in range(-5, 6) for j in range(-5, 6)])\n for s in S:\n self.V[s] = numpy.float16(0)\n self.Q[s] = dict()\n self.policy[s] = dict()\n for a in self.Agent.actions:\n self.policy[s][a] = numpy.float16(1.0 / len(self.Agent.actions))\n for o in self.Agent.actions:\n self.Q[s][a, o] = numpy.float16(0.0)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Q[s][a, o] = (1 - self.alpha) * self.Q[s][a, o] + self.alpha * (r + self.gamma * self.V[s_prime])\n minimum = None\n for a in self.Agent.actions:\n for o in self.Agent.actions:\n if self.Q[s][a, o] < minimum or minimum == None:\n minimum = self.Q[s][a, o]\n self.V[s] = minimum\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000083", "length_bytes": 2158, "license_type": "no_license", "methods": [{"docstring": "Fill all values of Q based on a given optimistic value.", "name": "__init__", "signature": "def __init__(self, Agent, alpha, gamma, epsilon)"}, {"docstring": "Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.", "name": "updateQ", "signature": "def updateQ(self, s, a, o, s_prime, r)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000924", "prompt": "Implement the Python class `TeamQLearning` described below.\n\nClass description:\nImplementation of functions related to Q-learning.\n\nMethod signatures and docstrings:\n- def __init__(self, Agent, alpha, gamma, epsilon): Fill all values of Q based on a given optimistic value.\n- def updateQ(self, s, a, o, s_prime, r): Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.", "prompted_full_text": "Implement the Python class `TeamQLearning` described below.\n\nClass description:\nImplementation of functions related to Q-learning.\n\nMethod signatures and docstrings:\n- def __init__(self, Agent, alpha, gamma, epsilon): Fill all values of Q based on a given optimistic value.\n- def updateQ(self, s, a, o, s_prime, r): Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.\n\n<|skeleton|>\nclass TeamQLearning:\n \"\"\"Implementation of functions related to Q-learning.\"\"\"\n\n def __init__(self, Agent, alpha, gamma, epsilon):\n \"\"\"Fill all values of Q based on a given optimistic value.\"\"\"\n <|body_0|>\n\n def updateQ(self, s, a, o, s_prime, r):\n \"\"\"Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Agent = Agent\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy = dict()\n self.Q = dict()\n self.V = dict()\n S = set([(i, j) for i in range(-5, 6) for j in range(-5, 6)])\n for s in S:\n self.V[s] = numpy.float16(0)\n self.Q[s] = dict()\n self.policy[s] = dict()\n for a in self.Agent.actions:\n self.policy[s][a] = numpy.float16(1.0 / len(self.Agent.actions))\n for o in self.Agent.actions:\n self.Q[s][a, o] = numpy.float16(0.0)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Q[s][a, o] = (1 - self.alpha) * self.Q[s][a, o] + self.alpha * (r + self.gamma * self.V[s_prime])\n minimum = None\n for a in self.Agent.actions:\n for o in self.Agent.actions:\n if self.Q[s][a, o] < minimum or minimum == None:\n minimum = self.Q[s][a, o]\n self.V[s] = minimum\n<|end_body_1|>\n", "revision_id": "a1bc1f82f2824055d3adcd0c33105556aa4099a8", "skeleton": "<|skeleton|>\nclass TeamQLearning:\n \"\"\"Implementation of functions related to Q-learning.\"\"\"\n\n def __init__(self, Agent, alpha, gamma, epsilon):\n \"\"\"Fill all values of Q based on a given optimistic value.\"\"\"\n <|body_0|>\n\n def updateQ(self, s, a, o, s_prime, r):\n \"\"\"Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TeamQLearning:\n \"\"\"Implementation of functions related to Q-learning.\"\"\"\n\n def __init__(self, Agent, alpha, gamma, epsilon):\n \"\"\"Fill all values of Q based on a given optimistic value.\"\"\"\n self.Agent = Agent\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy = dict()\n self.Q = dict()\n self.V = dict()\n S = set([(i, j) for i in range(-5, 6) for j in range(-5, 6)])\n for s in S:\n self.V[s] = numpy.float16(0)\n self.Q[s] = dict()\n self.policy[s] = dict()\n for a in self.Agent.actions:\n self.policy[s][a] = numpy.float16(1.0 / len(self.Agent.actions))\n for o in self.Agent.actions:\n self.Q[s][a, o] = numpy.float16(0.0)\n\n def updateQ(self, s, a, o, s_prime, r):\n \"\"\"Perform one step for this agent for a given state s. Action, resulting state s_prime, and observed reward r are also given.\"\"\"\n self.Q[s][a, o] = (1 - self.alpha) * self.Q[s][a, o] + self.alpha * (r + self.gamma * self.V[s_prime])\n minimum = None\n for a in self.Agent.actions:\n for o in self.Agent.actions:\n if self.Q[s][a, o] < minimum or minimum == None:\n minimum = self.Q[s][a, o]\n self.V[s] = minimum\n", "source": "the_stack_v2_python_sparse", "source_path": "source/MultiAgent/Minimax/TeamQLearning.py", "source_repo": "camielv/UvA-MasterAI-AA", "split": "val", "star_events_count": 0} {"blob_id": "7973e3adbee2b3e3a32e8e98a33a8b263eca11ef", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\nprops = repo.gasparde_ljmcgann.properties\nfor prop in props.find():\n print('hi!')\n _id = prop['_id']\n address = prop['ST_NUM'].split(' ')[0] + ' ' + prop['ST_NAME'] + ' ' + prop['ST_NAME_SUF'] + ' BOSTON, MASSACHUSETTS ' + prop['ZIPCODE']\n r = {'_id': _id, 'AvgTotal': prop['AV_TOTAL'], 'LivingArea': prop['LIVING_AREA'], 'GeoLocation': address}\n repo.dropCollection('property_assessment')\n repo.createCollection('property_assessment')\n repo['gasparde_ljmcgann.property_assessment'].insert_one(r)\n repo['gasparde_ljmcgann.property_assessment'].metadata({'complete': True})\n print(repo['gasparde_ljmcgann.property_assessment'].metadata())\n break\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\nthis_script = doc.agent('alg:gasparde_ljmcgann#livingAreaCost', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nresource = doc.entity('bdp:wc8w-nujj', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nget_services = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_services, this_script)\ndoc.wasAssociatedWith(get_services, this_script)\ndoc.usage(get_services, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?$format=json'})\nservices = doc.entity('dat:gasparde_ljmcgann#services', {prov.model.PROV_LABEL: 'Service Request in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(services, this_script)\ndoc.wasGeneratedBy(services, get_services, endTime)\ndoc.wasDerivedFrom(services, resource, get_services, get_services, get_services)\nresult = doc.entity('dat:gaspare_ljmcgann#property_assessment', {prov.model.PROV_LABEL: 'Price of Living Area'})\ndoc.wasAttributedTo(services, this_script)\ndoc.wasGeneratedBy(services, get_services, endTime)\ndoc.wasDerivedFrom(result, resource, get_services, get_services, get_services)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n props = repo.gasparde_ljmcgann.properties\n for prop in props.find():\n print('hi!')\n _id = prop['_id']\n address = prop['ST_NUM'].split(' ')[0] + ' ' + prop['ST_NAME'] + ' ' + prop['ST_NAME_SUF'] + ' BOSTON, MASSACHUSETTS ' + prop['ZIPCODE']\n r = {'_id': _id, 'AvgTotal': prop['AV_TOTAL'], 'LivingArea': prop['LIVING_AREA'], 'GeoLocation': address}\n repo.dropCollection('property_assessment')\n repo.createCollection('property_assessment')\n repo['gasparde_ljmcgann.property_assessment'].insert_one(r)\n repo['gasparde_ljmcgann.property_assessment'].metadata({'complete': True})\n print(repo['gasparde_ljmcgann.property_assessment'].metadata())\n break\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:gasparde_ljmcgann#livingAreaCost', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:wc8w-nujj', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_services = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_services, this_script)\n doc.wasAssociatedWith(get_services, this_script)\n doc.usage(get_services, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?$format=json'})\n services = doc.entity('dat:gasparde_ljmcgann#services', {prov.model.PROV_LABEL: 'Service Request in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(services, resource, get_services, get_services, get_services)\n result = doc.entity('dat:gaspare_ljmcgann#property_assessment', {prov.model.PROV_LABEL: 'Price of Living Area'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(result, resource, get_services, get_services, get_services)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "livingAreaCost", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass livingAreaCost:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n props = repo.gasparde_ljmcgann.properties\n for prop in props.find():\n print('hi!')\n _id = prop['_id']\n address = prop['ST_NUM'].split(' ')[0] + ' ' + prop['ST_NAME'] + ' ' + prop['ST_NAME_SUF'] + ' BOSTON, MASSACHUSETTS ' + prop['ZIPCODE']\n r = {'_id': _id, 'AvgTotal': prop['AV_TOTAL'], 'LivingArea': prop['LIVING_AREA'], 'GeoLocation': address}\n repo.dropCollection('property_assessment')\n repo.createCollection('property_assessment')\n repo['gasparde_ljmcgann.property_assessment'].insert_one(r)\n repo['gasparde_ljmcgann.property_assessment'].metadata({'complete': True})\n print(repo['gasparde_ljmcgann.property_assessment'].metadata())\n break\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:gasparde_ljmcgann#livingAreaCost', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:wc8w-nujj', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_services = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_services, this_script)\n doc.wasAssociatedWith(get_services, this_script)\n doc.usage(get_services, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?$format=json'})\n services = doc.entity('dat:gasparde_ljmcgann#services', {prov.model.PROV_LABEL: 'Service Request in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(services, resource, get_services, get_services, get_services)\n result = doc.entity('dat:gaspare_ljmcgann#property_assessment', {prov.model.PROV_LABEL: 'Price of Living Area'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(result, resource, get_services, get_services, get_services)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000084", "length_bytes": 4882, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets (not using the API here for the sake of simplicity).", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "prompt": "Implement the Python class `livingAreaCost` described below.\n\nClass description:\nImplement the livingAreaCost class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `livingAreaCost` described below.\n\nClass description:\nImplement the livingAreaCost class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass livingAreaCost:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n props = repo.gasparde_ljmcgann.properties\n for prop in props.find():\n print('hi!')\n _id = prop['_id']\n address = prop['ST_NUM'].split(' ')[0] + ' ' + prop['ST_NAME'] + ' ' + prop['ST_NAME_SUF'] + ' BOSTON, MASSACHUSETTS ' + prop['ZIPCODE']\n r = {'_id': _id, 'AvgTotal': prop['AV_TOTAL'], 'LivingArea': prop['LIVING_AREA'], 'GeoLocation': address}\n repo.dropCollection('property_assessment')\n repo.createCollection('property_assessment')\n repo['gasparde_ljmcgann.property_assessment'].insert_one(r)\n repo['gasparde_ljmcgann.property_assessment'].metadata({'complete': True})\n print(repo['gasparde_ljmcgann.property_assessment'].metadata())\n break\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:gasparde_ljmcgann#livingAreaCost', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:wc8w-nujj', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_services = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_services, this_script)\n doc.wasAssociatedWith(get_services, this_script)\n doc.usage(get_services, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?$format=json'})\n services = doc.entity('dat:gasparde_ljmcgann#services', {prov.model.PROV_LABEL: 'Service Request in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(services, resource, get_services, get_services, get_services)\n result = doc.entity('dat:gaspare_ljmcgann#property_assessment', {prov.model.PROV_LABEL: 'Price of Living Area'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(result, resource, get_services, get_services, get_services)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "90284cf3debbac36eead07b8d2339cdd191b86cf", "skeleton": "<|skeleton|>\nclass livingAreaCost:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class livingAreaCost:\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n props = repo.gasparde_ljmcgann.properties\n for prop in props.find():\n print('hi!')\n _id = prop['_id']\n address = prop['ST_NUM'].split(' ')[0] + ' ' + prop['ST_NAME'] + ' ' + prop['ST_NAME_SUF'] + ' BOSTON, MASSACHUSETTS ' + prop['ZIPCODE']\n r = {'_id': _id, 'AvgTotal': prop['AV_TOTAL'], 'LivingArea': prop['LIVING_AREA'], 'GeoLocation': address}\n repo.dropCollection('property_assessment')\n repo.createCollection('property_assessment')\n repo['gasparde_ljmcgann.property_assessment'].insert_one(r)\n repo['gasparde_ljmcgann.property_assessment'].metadata({'complete': True})\n print(repo['gasparde_ljmcgann.property_assessment'].metadata())\n break\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('gasparde_ljmcgann', 'gasparde_ljmcgann')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:gasparde_ljmcgann#livingAreaCost', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:wc8w-nujj', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_services = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_services, this_script)\n doc.wasAssociatedWith(get_services, this_script)\n doc.usage(get_services, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?$format=json'})\n services = doc.entity('dat:gasparde_ljmcgann#services', {prov.model.PROV_LABEL: 'Service Request in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(services, resource, get_services, get_services, get_services)\n result = doc.entity('dat:gaspare_ljmcgann#property_assessment', {prov.model.PROV_LABEL: 'Price of Living Area'})\n doc.wasAttributedTo(services, this_script)\n doc.wasGeneratedBy(services, get_services, endTime)\n doc.wasDerivedFrom(result, resource, get_services, get_services, get_services)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "gasparde_ljmcgann/livingAreaCost.py", "source_repo": "maximega/course-2019-spr-proj", "split": "val", "star_events_count": 2} {"blob_id": "44b819dfe19122868d3f208fdf445d9b9ad91dae", "bodies": ["self.is_endpoint_valid(self.request.path)\npath = self.sanitize_path(self.request.get_full_path())\npath = self.append_credentials(path)\nr = requests.get('https://services.datafordeler.dk' + path)\nresponse = HttpResponse(r.content, status=r.status_code)\nexcluded_headers = ['connection', 'content-encoding', 'content-length', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']\nfor key, value in r.headers.items():\n if key.lower() not in excluded_headers:\n response[key] = value\nreturn response", "endpoint = path.replace(self.PROXY_URL, '', 1)\nif endpoint not in self.VALID_ENDPOINTS:\n logger.warning(\"Maps endpoint was invalid: '%s' valid endpoints: %s\", endpoint, self.VALID_ENDPOINTS)\n raise PermissionDenied('No thanks')", "new_path = path.replace(self.PROXY_URL, '', 1)\nsanitized_path = re.sub('(transparent=)(true|false)', lambda match: f'{match.group(1)}{match.group(2).upper()}', new_path)\nreturn sanitized_path", "username = settings.DATAFORDELER_USER\npassword = settings.DATAFORDELER_PASSWORD\nif not username or not password:\n logger.error(\"Missing credentials for 'DATAFORDELER_USER' or 'DATAFORDELER_PASSWORD'\")\n raise MissingCredentials()\npath += f'&username={username}&password={password}'\nreturn path"], "bodies_text": "<|body_start_0|>\n self.is_endpoint_valid(self.request.path)\n path = self.sanitize_path(self.request.get_full_path())\n path = self.append_credentials(path)\n r = requests.get('https://services.datafordeler.dk' + path)\n response = HttpResponse(r.content, status=r.status_code)\n excluded_headers = ['connection', 'content-encoding', 'content-length', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']\n for key, value in r.headers.items():\n if key.lower() not in excluded_headers:\n response[key] = value\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n endpoint = path.replace(self.PROXY_URL, '', 1)\n if endpoint not in self.VALID_ENDPOINTS:\n logger.warning(\"Maps endpoint was invalid: '%s' valid endpoints: %s\", endpoint, self.VALID_ENDPOINTS)\n raise PermissionDenied('No thanks')\n<|end_body_1|>\n\n<|body_start_2|>\n new_path = path.replace(self.PROXY_URL, '', 1)\n sanitized_path = re.sub('(transparent=)(true|false)', lambda match: f'{match.group(1)}{match.group(2).upper()}', new_path)\n return sanitized_path\n<|end_body_2|>\n\n<|body_start_3|>\n username = settings.DATAFORDELER_USER\n password = settings.DATAFORDELER_PASSWORD\n if not username or not password:\n logger.error(\"Missing credentials for 'DATAFORDELER_USER' or 'DATAFORDELER_PASSWORD'\")\n raise MissingCredentials()\n path += f'&username={username}&password={password}'\n return path\n<|end_body_3|>\n", "class_docstring": "Proxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.", "class_name": "MapProxyView", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MapProxyView:\n \"\"\"Proxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\"\"\"\n\n def get(self, *args, **kwargs):\n \"\"\"Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\"\"\"\n <|body_0|>\n\n def is_endpoint_valid(self, path: str) -> None:\n \"\"\"Validate request path against whitelisted endpoints or raise PermDenied\"\"\"\n <|body_1|>\n\n def sanitize_path(self, path: str) -> str:\n \"\"\"Sanitize path by removing PROXY_URL and set 'transparent' value to upper\"\"\"\n <|body_2|>\n\n def append_credentials(self, path: str) -> str:\n \"\"\"Verify credentials are defined in settings & append or raise exception\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_endpoint_valid(self.request.path)\n path = self.sanitize_path(self.request.get_full_path())\n path = self.append_credentials(path)\n r = requests.get('https://services.datafordeler.dk' + path)\n response = HttpResponse(r.content, status=r.status_code)\n excluded_headers = ['connection', 'content-encoding', 'content-length', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']\n for key, value in r.headers.items():\n if key.lower() not in excluded_headers:\n response[key] = value\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n endpoint = path.replace(self.PROXY_URL, '', 1)\n if endpoint not in self.VALID_ENDPOINTS:\n logger.warning(\"Maps endpoint was invalid: '%s' valid endpoints: %s\", endpoint, self.VALID_ENDPOINTS)\n raise PermissionDenied('No thanks')\n<|end_body_1|>\n\n<|body_start_2|>\n new_path = path.replace(self.PROXY_URL, '', 1)\n sanitized_path = re.sub('(transparent=)(true|false)', lambda match: f'{match.group(1)}{match.group(2).upper()}', new_path)\n return sanitized_path\n<|end_body_2|>\n\n<|body_start_3|>\n username = settings.DATAFORDELER_USER\n password = settings.DATAFORDELER_PASSWORD\n if not username or not password:\n logger.error(\"Missing credentials for 'DATAFORDELER_USER' or 'DATAFORDELER_PASSWORD'\")\n raise MissingCredentials()\n path += f'&username={username}&password={password}'\n return path\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000085", "length_bytes": 3504, "license_type": "permissive", "methods": [{"docstring": "Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.", "name": "get", "signature": "def get(self, *args, **kwargs)"}, {"docstring": "Validate request path against whitelisted endpoints or raise PermDenied", "name": "is_endpoint_valid", "signature": "def is_endpoint_valid(self, path: str) -> None"}, {"docstring": "Sanitize path by removing PROXY_URL and set 'transparent' value to upper", "name": "sanitize_path", "signature": "def sanitize_path(self, path: str) -> str"}, {"docstring": "Verify credentials are defined in settings & append or raise exception", "name": "append_credentials", "signature": "def append_credentials(self, path: str) -> str"}], "n_methods": 4, "prompt": "Implement the Python class `MapProxyView` described below.\n\nClass description:\nProxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\n\nMethod signatures and docstrings:\n- def get(self, *args, **kwargs): Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\n- def is_endpoint_valid(self, path: str) -> None: Validate request path against whitelisted endpoints or raise PermDenied\n- def sanitize_path(self, path: str) -> str: Sanitize path by removing PROXY_URL and set 'transparent' value to upper\n- def append_credentials(self, path: str) -> str: Verify credentials are defined in settings & append or raise exception", "prompted_full_text": "Implement the Python class `MapProxyView` described below.\n\nClass description:\nProxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\n\nMethod signatures and docstrings:\n- def get(self, *args, **kwargs): Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\n- def is_endpoint_valid(self, path: str) -> None: Validate request path against whitelisted endpoints or raise PermDenied\n- def sanitize_path(self, path: str) -> str: Sanitize path by removing PROXY_URL and set 'transparent' value to upper\n- def append_credentials(self, path: str) -> str: Verify credentials are defined in settings & append or raise exception\n\n<|skeleton|>\nclass MapProxyView:\n \"\"\"Proxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\"\"\"\n\n def get(self, *args, **kwargs):\n \"\"\"Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\"\"\"\n <|body_0|>\n\n def is_endpoint_valid(self, path: str) -> None:\n \"\"\"Validate request path against whitelisted endpoints or raise PermDenied\"\"\"\n <|body_1|>\n\n def sanitize_path(self, path: str) -> str:\n \"\"\"Sanitize path by removing PROXY_URL and set 'transparent' value to upper\"\"\"\n <|body_2|>\n\n def append_credentials(self, path: str) -> str:\n \"\"\"Verify credentials are defined in settings & append or raise exception\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_endpoint_valid(self.request.path)\n path = self.sanitize_path(self.request.get_full_path())\n path = self.append_credentials(path)\n r = requests.get('https://services.datafordeler.dk' + path)\n response = HttpResponse(r.content, status=r.status_code)\n excluded_headers = ['connection', 'content-encoding', 'content-length', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']\n for key, value in r.headers.items():\n if key.lower() not in excluded_headers:\n response[key] = value\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n endpoint = path.replace(self.PROXY_URL, '', 1)\n if endpoint not in self.VALID_ENDPOINTS:\n logger.warning(\"Maps endpoint was invalid: '%s' valid endpoints: %s\", endpoint, self.VALID_ENDPOINTS)\n raise PermissionDenied('No thanks')\n<|end_body_1|>\n\n<|body_start_2|>\n new_path = path.replace(self.PROXY_URL, '', 1)\n sanitized_path = re.sub('(transparent=)(true|false)', lambda match: f'{match.group(1)}{match.group(2).upper()}', new_path)\n return sanitized_path\n<|end_body_2|>\n\n<|body_start_3|>\n username = settings.DATAFORDELER_USER\n password = settings.DATAFORDELER_PASSWORD\n if not username or not password:\n logger.error(\"Missing credentials for 'DATAFORDELER_USER' or 'DATAFORDELER_PASSWORD'\")\n raise MissingCredentials()\n path += f'&username={username}&password={password}'\n return path\n<|end_body_3|>\n", "revision_id": "767deb7f58429e9162e0c2ef79be9f0f38f37ce1", "skeleton": "<|skeleton|>\nclass MapProxyView:\n \"\"\"Proxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\"\"\"\n\n def get(self, *args, **kwargs):\n \"\"\"Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\"\"\"\n <|body_0|>\n\n def is_endpoint_valid(self, path: str) -> None:\n \"\"\"Validate request path against whitelisted endpoints or raise PermDenied\"\"\"\n <|body_1|>\n\n def sanitize_path(self, path: str) -> str:\n \"\"\"Sanitize path by removing PROXY_URL and set 'transparent' value to upper\"\"\"\n <|body_2|>\n\n def append_credentials(self, path: str) -> str:\n \"\"\"Verify credentials are defined in settings & append or raise exception\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MapProxyView:\n \"\"\"Proxy for Datafordeler map service. Created so we can show maps without leaking the IP of our visitors.\"\"\"\n\n def get(self, *args, **kwargs):\n \"\"\"Before we make the request we check that the path is in our whitelist. Before we return the response we copy headers except for a list we dont want.\"\"\"\n self.is_endpoint_valid(self.request.path)\n path = self.sanitize_path(self.request.get_full_path())\n path = self.append_credentials(path)\n r = requests.get('https://services.datafordeler.dk' + path)\n response = HttpResponse(r.content, status=r.status_code)\n excluded_headers = ['connection', 'content-encoding', 'content-length', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']\n for key, value in r.headers.items():\n if key.lower() not in excluded_headers:\n response[key] = value\n return response\n\n def is_endpoint_valid(self, path: str) -> None:\n \"\"\"Validate request path against whitelisted endpoints or raise PermDenied\"\"\"\n endpoint = path.replace(self.PROXY_URL, '', 1)\n if endpoint not in self.VALID_ENDPOINTS:\n logger.warning(\"Maps endpoint was invalid: '%s' valid endpoints: %s\", endpoint, self.VALID_ENDPOINTS)\n raise PermissionDenied('No thanks')\n\n def sanitize_path(self, path: str) -> str:\n \"\"\"Sanitize path by removing PROXY_URL and set 'transparent' value to upper\"\"\"\n new_path = path.replace(self.PROXY_URL, '', 1)\n sanitized_path = re.sub('(transparent=)(true|false)', lambda match: f'{match.group(1)}{match.group(2).upper()}', new_path)\n return sanitized_path\n\n def append_credentials(self, path: str) -> str:\n \"\"\"Verify credentials are defined in settings & append or raise exception\"\"\"\n username = settings.DATAFORDELER_USER\n password = settings.DATAFORDELER_PASSWORD\n if not username or not password:\n logger.error(\"Missing credentials for 'DATAFORDELER_USER' or 'DATAFORDELER_PASSWORD'\")\n raise MissingCredentials()\n path += f'&username={username}&password={password}'\n return path\n", "source": "the_stack_v2_python_sparse", "source_path": "src/maps/views.py", "source_repo": "bornhack/bornhack-website", "split": "val", "star_events_count": 9} {"blob_id": "05e3f156ebc537a3fa03dc0c8c32802d4954f670", "bodies": ["if 'case_name' not in kwargs:\n kwargs['case_name'] = 'rally_jobs'\nsuper().__init__(**kwargs)\nself.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')\nself.task_yaml = None", "super().prepare_run(**kwargs)\nwith open(os.path.join(self.rally_dir, 'rally_jobs.yaml'), 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\nfor task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")", "LOGGER.debug('Applying blacklist...')\nblack_tests = list(set(self.excl_func() + self.excl_scenario()))\nif black_tests:\n LOGGER.debug('Blacklisted tests: %s', str(black_tests))\ntemplate = YAML(typ='jinja2')\nwith open(case_file_name, 'r', encoding='utf-8') as fname:\n cases = template.load(fname)\nif cases.get('version', 1) == 1:\n for name in cases.keys():\n if self.in_iterable_re(name, black_tests):\n cases.pop(name)\nelse:\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n for wind, workload in reversed(list(enumerate(subtask.get('workloads', [])))):\n scenario = workload.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'][sind]['workloads'].pop(wind)\n break\n if 'workloads' in cases['subtasks'][sind]:\n if not cases['subtasks'][sind]['workloads']:\n cases['subtasks'].pop(sind)\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n scenario = subtask.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'].pop(sind)\n break\nwith open(result_file_name, 'w', encoding='utf-8') as fname:\n template.dump(cases, fname)", "task_args = {}\nif self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\nelse:\n task_args['floating_network'] = ''\ntask_args['image_name'] = str(self.image.name)\ntask_args['flavor_name'] = str(self.flavor.name)\nreturn task_args", "jobs_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')\ntask_name = self.task_yaml.get(test_name).get('task')\ntask = os.path.join(jobs_dir, task_name)\nif not os.path.exists(task):\n raise Exception(f\"The scenario '{task}' does not exist.\")\nLOGGER.debug('Scenario fetched from : %s', task)\nif not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\ntask_file_name = os.path.join(self.temp_dir, task_name)\nself.apply_blacklist(task, task_file_name)\nself.run_cmd = ['rally', 'task', 'start', '--tag', test_name, '--task', task_file_name, '--task-args', str(self.build_task_args(test_name))]\nreturn True"], "bodies_text": "<|body_start_0|>\n if 'case_name' not in kwargs:\n kwargs['case_name'] = 'rally_jobs'\n super().__init__(**kwargs)\n self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')\n self.task_yaml = None\n<|end_body_0|>\n\n<|body_start_1|>\n super().prepare_run(**kwargs)\n with open(os.path.join(self.rally_dir, 'rally_jobs.yaml'), 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")\n<|end_body_1|>\n\n<|body_start_2|>\n LOGGER.debug('Applying blacklist...')\n black_tests = list(set(self.excl_func() + self.excl_scenario()))\n if black_tests:\n LOGGER.debug('Blacklisted tests: %s', str(black_tests))\n template = YAML(typ='jinja2')\n with open(case_file_name, 'r', encoding='utf-8') as fname:\n cases = template.load(fname)\n if cases.get('version', 1) == 1:\n for name in cases.keys():\n if self.in_iterable_re(name, black_tests):\n cases.pop(name)\n else:\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n for wind, workload in reversed(list(enumerate(subtask.get('workloads', [])))):\n scenario = workload.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'][sind]['workloads'].pop(wind)\n break\n if 'workloads' in cases['subtasks'][sind]:\n if not cases['subtasks'][sind]['workloads']:\n cases['subtasks'].pop(sind)\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n scenario = subtask.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'].pop(sind)\n break\n with open(result_file_name, 'w', encoding='utf-8') as fname:\n template.dump(cases, fname)\n<|end_body_2|>\n\n<|body_start_3|>\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args\n<|end_body_3|>\n\n<|body_start_4|>\n jobs_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')\n task_name = self.task_yaml.get(test_name).get('task')\n task = os.path.join(jobs_dir, task_name)\n if not os.path.exists(task):\n raise Exception(f\"The scenario '{task}' does not exist.\")\n LOGGER.debug('Scenario fetched from : %s', task)\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n task_file_name = os.path.join(self.temp_dir, task_name)\n self.apply_blacklist(task, task_file_name)\n self.run_cmd = ['rally', 'task', 'start', '--tag', test_name, '--task', task_file_name, '--task-args', str(self.build_task_args(test_name))]\n return True\n<|end_body_4|>\n", "class_docstring": "Rally OpenStack CI testcase implementation.", "class_name": "RallyJobs", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RallyJobs:\n \"\"\"Rally OpenStack CI testcase implementation.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize RallyJobs object.\"\"\"\n <|body_0|>\n\n def prepare_run(self, **kwargs):\n \"\"\"Create resources needed by test scenarios.\"\"\"\n <|body_1|>\n\n def apply_blacklist(self, case_file_name, result_file_name):\n \"\"\"Apply blacklist.\"\"\"\n <|body_2|>\n\n def build_task_args(self, test_name):\n \"\"\"Build arguments for the Rally task.\"\"\"\n <|body_3|>\n\n def prepare_task(self, test_name):\n \"\"\"Prepare resources for test run.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'case_name' not in kwargs:\n kwargs['case_name'] = 'rally_jobs'\n super().__init__(**kwargs)\n self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')\n self.task_yaml = None\n<|end_body_0|>\n\n<|body_start_1|>\n super().prepare_run(**kwargs)\n with open(os.path.join(self.rally_dir, 'rally_jobs.yaml'), 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")\n<|end_body_1|>\n\n<|body_start_2|>\n LOGGER.debug('Applying blacklist...')\n black_tests = list(set(self.excl_func() + self.excl_scenario()))\n if black_tests:\n LOGGER.debug('Blacklisted tests: %s', str(black_tests))\n template = YAML(typ='jinja2')\n with open(case_file_name, 'r', encoding='utf-8') as fname:\n cases = template.load(fname)\n if cases.get('version', 1) == 1:\n for name in cases.keys():\n if self.in_iterable_re(name, black_tests):\n cases.pop(name)\n else:\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n for wind, workload in reversed(list(enumerate(subtask.get('workloads', [])))):\n scenario = workload.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'][sind]['workloads'].pop(wind)\n break\n if 'workloads' in cases['subtasks'][sind]:\n if not cases['subtasks'][sind]['workloads']:\n cases['subtasks'].pop(sind)\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n scenario = subtask.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'].pop(sind)\n break\n with open(result_file_name, 'w', encoding='utf-8') as fname:\n template.dump(cases, fname)\n<|end_body_2|>\n\n<|body_start_3|>\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args\n<|end_body_3|>\n\n<|body_start_4|>\n jobs_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')\n task_name = self.task_yaml.get(test_name).get('task')\n task = os.path.join(jobs_dir, task_name)\n if not os.path.exists(task):\n raise Exception(f\"The scenario '{task}' does not exist.\")\n LOGGER.debug('Scenario fetched from : %s', task)\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n task_file_name = os.path.join(self.temp_dir, task_name)\n self.apply_blacklist(task, task_file_name)\n self.run_cmd = ['rally', 'task', 'start', '--tag', test_name, '--task', task_file_name, '--task-args', str(self.build_task_args(test_name))]\n return True\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000086", "length_bytes": 32891, "license_type": "permissive", "methods": [{"docstring": "Initialize RallyJobs object.", "name": "__init__", "signature": "def __init__(self, **kwargs)"}, {"docstring": "Create resources needed by test scenarios.", "name": "prepare_run", "signature": "def prepare_run(self, **kwargs)"}, {"docstring": "Apply blacklist.", "name": "apply_blacklist", "signature": "def apply_blacklist(self, case_file_name, result_file_name)"}, {"docstring": "Build arguments for the Rally task.", "name": "build_task_args", "signature": "def build_task_args(self, test_name)"}, {"docstring": "Prepare resources for test run.", "name": "prepare_task", "signature": "def prepare_task(self, test_name)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_003839", "prompt": "Implement the Python class `RallyJobs` described below.\n\nClass description:\nRally OpenStack CI testcase implementation.\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Initialize RallyJobs object.\n- def prepare_run(self, **kwargs): Create resources needed by test scenarios.\n- def apply_blacklist(self, case_file_name, result_file_name): Apply blacklist.\n- def build_task_args(self, test_name): Build arguments for the Rally task.\n- def prepare_task(self, test_name): Prepare resources for test run.", "prompted_full_text": "Implement the Python class `RallyJobs` described below.\n\nClass description:\nRally OpenStack CI testcase implementation.\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Initialize RallyJobs object.\n- def prepare_run(self, **kwargs): Create resources needed by test scenarios.\n- def apply_blacklist(self, case_file_name, result_file_name): Apply blacklist.\n- def build_task_args(self, test_name): Build arguments for the Rally task.\n- def prepare_task(self, test_name): Prepare resources for test run.\n\n<|skeleton|>\nclass RallyJobs:\n \"\"\"Rally OpenStack CI testcase implementation.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize RallyJobs object.\"\"\"\n <|body_0|>\n\n def prepare_run(self, **kwargs):\n \"\"\"Create resources needed by test scenarios.\"\"\"\n <|body_1|>\n\n def apply_blacklist(self, case_file_name, result_file_name):\n \"\"\"Apply blacklist.\"\"\"\n <|body_2|>\n\n def build_task_args(self, test_name):\n \"\"\"Build arguments for the Rally task.\"\"\"\n <|body_3|>\n\n def prepare_task(self, test_name):\n \"\"\"Prepare resources for test run.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'case_name' not in kwargs:\n kwargs['case_name'] = 'rally_jobs'\n super().__init__(**kwargs)\n self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')\n self.task_yaml = None\n<|end_body_0|>\n\n<|body_start_1|>\n super().prepare_run(**kwargs)\n with open(os.path.join(self.rally_dir, 'rally_jobs.yaml'), 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")\n<|end_body_1|>\n\n<|body_start_2|>\n LOGGER.debug('Applying blacklist...')\n black_tests = list(set(self.excl_func() + self.excl_scenario()))\n if black_tests:\n LOGGER.debug('Blacklisted tests: %s', str(black_tests))\n template = YAML(typ='jinja2')\n with open(case_file_name, 'r', encoding='utf-8') as fname:\n cases = template.load(fname)\n if cases.get('version', 1) == 1:\n for name in cases.keys():\n if self.in_iterable_re(name, black_tests):\n cases.pop(name)\n else:\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n for wind, workload in reversed(list(enumerate(subtask.get('workloads', [])))):\n scenario = workload.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'][sind]['workloads'].pop(wind)\n break\n if 'workloads' in cases['subtasks'][sind]:\n if not cases['subtasks'][sind]['workloads']:\n cases['subtasks'].pop(sind)\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n scenario = subtask.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'].pop(sind)\n break\n with open(result_file_name, 'w', encoding='utf-8') as fname:\n template.dump(cases, fname)\n<|end_body_2|>\n\n<|body_start_3|>\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args\n<|end_body_3|>\n\n<|body_start_4|>\n jobs_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')\n task_name = self.task_yaml.get(test_name).get('task')\n task = os.path.join(jobs_dir, task_name)\n if not os.path.exists(task):\n raise Exception(f\"The scenario '{task}' does not exist.\")\n LOGGER.debug('Scenario fetched from : %s', task)\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n task_file_name = os.path.join(self.temp_dir, task_name)\n self.apply_blacklist(task, task_file_name)\n self.run_cmd = ['rally', 'task', 'start', '--tag', test_name, '--task', task_file_name, '--task-args', str(self.build_task_args(test_name))]\n return True\n<|end_body_4|>\n", "revision_id": "27107d1f871dd7eb9eeab5f7c51086f3ef7e2ebe", "skeleton": "<|skeleton|>\nclass RallyJobs:\n \"\"\"Rally OpenStack CI testcase implementation.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize RallyJobs object.\"\"\"\n <|body_0|>\n\n def prepare_run(self, **kwargs):\n \"\"\"Create resources needed by test scenarios.\"\"\"\n <|body_1|>\n\n def apply_blacklist(self, case_file_name, result_file_name):\n \"\"\"Apply blacklist.\"\"\"\n <|body_2|>\n\n def build_task_args(self, test_name):\n \"\"\"Build arguments for the Rally task.\"\"\"\n <|body_3|>\n\n def prepare_task(self, test_name):\n \"\"\"Prepare resources for test run.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RallyJobs:\n \"\"\"Rally OpenStack CI testcase implementation.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize RallyJobs object.\"\"\"\n if 'case_name' not in kwargs:\n kwargs['case_name'] = 'rally_jobs'\n super().__init__(**kwargs)\n self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')\n self.task_yaml = None\n\n def prepare_run(self, **kwargs):\n \"\"\"Create resources needed by test scenarios.\"\"\"\n super().prepare_run(**kwargs)\n with open(os.path.join(self.rally_dir, 'rally_jobs.yaml'), 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")\n\n def apply_blacklist(self, case_file_name, result_file_name):\n \"\"\"Apply blacklist.\"\"\"\n LOGGER.debug('Applying blacklist...')\n black_tests = list(set(self.excl_func() + self.excl_scenario()))\n if black_tests:\n LOGGER.debug('Blacklisted tests: %s', str(black_tests))\n template = YAML(typ='jinja2')\n with open(case_file_name, 'r', encoding='utf-8') as fname:\n cases = template.load(fname)\n if cases.get('version', 1) == 1:\n for name in cases.keys():\n if self.in_iterable_re(name, black_tests):\n cases.pop(name)\n else:\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n for wind, workload in reversed(list(enumerate(subtask.get('workloads', [])))):\n scenario = workload.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'][sind]['workloads'].pop(wind)\n break\n if 'workloads' in cases['subtasks'][sind]:\n if not cases['subtasks'][sind]['workloads']:\n cases['subtasks'].pop(sind)\n for sind, subtask in reversed(list(enumerate(cases.get('subtasks', [])))):\n scenario = subtask.get('scenario', {})\n for name in scenario.keys():\n if self.in_iterable_re(name, black_tests):\n cases['subtasks'].pop(sind)\n break\n with open(result_file_name, 'w', encoding='utf-8') as fname:\n template.dump(cases, fname)\n\n def build_task_args(self, test_name):\n \"\"\"Build arguments for the Rally task.\"\"\"\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args\n\n def prepare_task(self, test_name):\n \"\"\"Prepare resources for test run.\"\"\"\n jobs_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')\n task_name = self.task_yaml.get(test_name).get('task')\n task = os.path.join(jobs_dir, task_name)\n if not os.path.exists(task):\n raise Exception(f\"The scenario '{task}' does not exist.\")\n LOGGER.debug('Scenario fetched from : %s', task)\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n task_file_name = os.path.join(self.temp_dir, task_name)\n self.apply_blacklist(task, task_file_name)\n self.run_cmd = ['rally', 'task', 'start', '--tag', test_name, '--task', task_file_name, '--task-args', str(self.build_task_args(test_name))]\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "functest/opnfv_tests/openstack/rally/rally.py", "source_repo": "opnfv/functest", "split": "val", "star_events_count": 23} {"blob_id": "d0dbe20e7be354ff1c9a605d07b30f41a4ad3bd4", "bodies": ["\"\"\"\n #Extract data for existing products\n data_to_update = []\n for index, product in enumerate(validated_data):\n if \"id\" in product:\n data_to_update.append(validated_data.pop(index))\n\n #Update existing products with extracted data\n self.update(Product.objects.filter(supply=self.context['supply']), data_to_update)\n\n #Create new products for data without ids\n return super(ProductListSerializer, self).create(validated_data)\n \"\"\"\nlogger.debug(validated_data)\nreturn self.update(Product.objects.filter(supply=self.context['supply']), validated_data)", "\"\"\"\n try:\n product_mapping = {product.id: product for product in instance}\n data_mapping = {int(item.get('id', 0)): item for item in validated_data}\n except SyntaxError as e:\n \"\"\"\nproduct_mapping = {}\nfor product in instance:\n product_mapping[product.supplier.id] = product\ndata_mapping = {}\nfor item in validated_data:\n try:\n logger.debug(item)\n data_mapping[int(item['supplier'].id)] = item\n except (KeyError, AttributeError) as e:\n pass\nret = []\nfor product_id, data in data_mapping.items():\n product = product_mapping.get(product_id, None)\n if product is None:\n ret.append(self.child.create(data))\n else:\n ret.append(self.child.update(product, data))\n'\\n for product_id, product in product_mapping.items():\\n if product_id not in data_mapping:\\n product.delete()\\n '\nreturn ret"], "bodies_text": "<|body_start_0|>\n \"\"\"\n #Extract data for existing products\n data_to_update = []\n for index, product in enumerate(validated_data):\n if \"id\" in product:\n data_to_update.append(validated_data.pop(index))\n\n #Update existing products with extracted data\n self.update(Product.objects.filter(supply=self.context['supply']), data_to_update)\n\n #Create new products for data without ids\n return super(ProductListSerializer, self).create(validated_data)\n \"\"\"\n logger.debug(validated_data)\n return self.update(Product.objects.filter(supply=self.context['supply']), validated_data)\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"\n try:\n product_mapping = {product.id: product for product in instance}\n data_mapping = {int(item.get('id', 0)): item for item in validated_data}\n except SyntaxError as e:\n \"\"\"\n product_mapping = {}\n for product in instance:\n product_mapping[product.supplier.id] = product\n data_mapping = {}\n for item in validated_data:\n try:\n logger.debug(item)\n data_mapping[int(item['supplier'].id)] = item\n except (KeyError, AttributeError) as e:\n pass\n ret = []\n for product_id, data in data_mapping.items():\n product = product_mapping.get(product_id, None)\n if product is None:\n ret.append(self.child.create(data))\n else:\n ret.append(self.child.update(product, data))\n '\\n for product_id, product in product_mapping.items():\\n if product_id not in data_mapping:\\n product.delete()\\n '\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ProductListSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProductListSerializer:\n\n def create(self, validated_data):\n \"\"\"Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\"\"\"\n <|body_0|>\n\n def update(self, instance, validated_data):\n \"\"\"Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n #Extract data for existing products\n data_to_update = []\n for index, product in enumerate(validated_data):\n if \"id\" in product:\n data_to_update.append(validated_data.pop(index))\n\n #Update existing products with extracted data\n self.update(Product.objects.filter(supply=self.context['supply']), data_to_update)\n\n #Create new products for data without ids\n return super(ProductListSerializer, self).create(validated_data)\n \"\"\"\n logger.debug(validated_data)\n return self.update(Product.objects.filter(supply=self.context['supply']), validated_data)\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"\n try:\n product_mapping = {product.id: product for product in instance}\n data_mapping = {int(item.get('id', 0)): item for item in validated_data}\n except SyntaxError as e:\n \"\"\"\n product_mapping = {}\n for product in instance:\n product_mapping[product.supplier.id] = product\n data_mapping = {}\n for item in validated_data:\n try:\n logger.debug(item)\n data_mapping[int(item['supplier'].id)] = item\n except (KeyError, AttributeError) as e:\n pass\n ret = []\n for product_id, data in data_mapping.items():\n product = product_mapping.get(product_id, None)\n if product is None:\n ret.append(self.child.create(data))\n else:\n ret.append(self.child.update(product, data))\n '\\n for product_id, product in product_mapping.items():\\n if product_id not in data_mapping:\\n product.delete()\\n '\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000087", "length_bytes": 21355, "license_type": "no_license", "methods": [{"docstring": "Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids", "name": "create", "signature": "def create(self, validated_data)"}, {"docstring": "Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data", "name": "update", "signature": "def update(self, instance, validated_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007191", "prompt": "Implement the Python class `ProductListSerializer` described below.\n\nClass description:\nImplement the ProductListSerializer class.\n\nMethod signatures and docstrings:\n- def create(self, validated_data): Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\n- def update(self, instance, validated_data): Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data", "prompted_full_text": "Implement the Python class `ProductListSerializer` described below.\n\nClass description:\nImplement the ProductListSerializer class.\n\nMethod signatures and docstrings:\n- def create(self, validated_data): Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\n- def update(self, instance, validated_data): Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data\n\n<|skeleton|>\nclass ProductListSerializer:\n\n def create(self, validated_data):\n \"\"\"Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\"\"\"\n <|body_0|>\n\n def update(self, instance, validated_data):\n \"\"\"Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n #Extract data for existing products\n data_to_update = []\n for index, product in enumerate(validated_data):\n if \"id\" in product:\n data_to_update.append(validated_data.pop(index))\n\n #Update existing products with extracted data\n self.update(Product.objects.filter(supply=self.context['supply']), data_to_update)\n\n #Create new products for data without ids\n return super(ProductListSerializer, self).create(validated_data)\n \"\"\"\n logger.debug(validated_data)\n return self.update(Product.objects.filter(supply=self.context['supply']), validated_data)\n<|end_body_0|>\n\n<|body_start_1|>\n \"\"\"\n try:\n product_mapping = {product.id: product for product in instance}\n data_mapping = {int(item.get('id', 0)): item for item in validated_data}\n except SyntaxError as e:\n \"\"\"\n product_mapping = {}\n for product in instance:\n product_mapping[product.supplier.id] = product\n data_mapping = {}\n for item in validated_data:\n try:\n logger.debug(item)\n data_mapping[int(item['supplier'].id)] = item\n except (KeyError, AttributeError) as e:\n pass\n ret = []\n for product_id, data in data_mapping.items():\n product = product_mapping.get(product_id, None)\n if product is None:\n ret.append(self.child.create(data))\n else:\n ret.append(self.child.update(product, data))\n '\\n for product_id, product in product_mapping.items():\\n if product_id not in data_mapping:\\n product.delete()\\n '\n return ret\n<|end_body_1|>\n", "revision_id": "bef520659a7316c861933f9609b6b9ca7d9f47ac", "skeleton": "<|skeleton|>\nclass ProductListSerializer:\n\n def create(self, validated_data):\n \"\"\"Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\"\"\"\n <|body_0|>\n\n def update(self, instance, validated_data):\n \"\"\"Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProductListSerializer:\n def create(self, validated_data):\n \"\"\"Override the 'create' method We separate the data with ids, which will be used to update existing products. After the existing products are updated or deleted, then new products are created for data without ids\"\"\"\n \"\"\"\n #Extract data for existing products\n data_to_update = []\n for index, product in enumerate(validated_data):\n if \"id\" in product:\n data_to_update.append(validated_data.pop(index))\n\n #Update existing products with extracted data\n self.update(Product.objects.filter(supply=self.context['supply']), data_to_update)\n\n #Create new products for data without ids\n return super(ProductListSerializer, self).create(validated_data)\n \"\"\"\n logger.debug(validated_data)\n return self.update(Product.objects.filter(supply=self.context['supply']), validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Implement 'update' method This method will both create and update existing products, based on whether there is an id present in the data\"\"\"\n \"\"\"\n try:\n product_mapping = {product.id: product for product in instance}\n data_mapping = {int(item.get('id', 0)): item for item in validated_data}\n except SyntaxError as e:\n \"\"\"\n product_mapping = {}\n for product in instance:\n product_mapping[product.supplier.id] = product\n data_mapping = {}\n for item in validated_data:\n try:\n logger.debug(item)\n data_mapping[int(item['supplier'].id)] = item\n except (KeyError, AttributeError) as e:\n pass\n ret = []\n for product_id, data in data_mapping.items():\n product = product_mapping.get(product_id, None)\n if product is None:\n ret.append(self.child.create(data))\n else:\n ret.append(self.child.update(product, data))\n '\\n for product_id, product in product_mapping.items():\\n if product_id not in data_mapping:\\n product.delete()\\n '\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "supplies/serializers.py", "source_repo": "charliephairoj/backend", "split": "val", "star_events_count": 0} {"blob_id": "3c69d82bf70110373a00967ffdbbbe65837f441f", "bodies": ["serializer = serializers_anio.CreateAnioSerializer(data=request.data)\ndata = {}\nif serializer.is_valid(raise_exception=True):\n try:\n instance = serializer.create()\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n data = {'id': instance.id}\nelse:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\nreturn Response(data=data, status=status.HTTP_201_CREATED)", "anio = get_object_or_404(self.get_queryset(), pk=pk)\nserializer = serializers_anio.EditAnioSerializer(data=request.data, partial=True)\ndata = {}\nif serializer.is_valid(raise_exception=True):\n try:\n serializer.update(anio, serializer.validated_data)\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n response_serializer = serializers_anio.AnioSerializer(anio)\n data = response_serializer.data\nelse:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\nreturn Response(data=data, status=status.HTTP_200_OK)", "queryset = Anio.objects.all()\nanio = get_object_or_404(queryset, pk=pk)\nif Curso.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga cursos!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\nif Materia.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga materias!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\nanio.delete()\nreturn Response(status=status.HTTP_200_OK)", "get_object_or_404(Carrera.objects.filter(institucion_id=request.user.institucion.id), pk=carrera_id)\nqueryset = Anio.objects.filter(carrera_id=carrera_id, carrera__institucion_id=request.user.institucion.id)\nserializer = serializers_anio.AnioSerializer(queryset, many=True)\nreturn Response(serializer.data)", "queryset = Anio.objects.filter(carrera__institucion_id=request.user.institucion.id)\nanio = get_object_or_404(queryset, pk=pk)\nserializer = serializers_anio.AnioSerializer(anio)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n serializer = serializers_anio.CreateAnioSerializer(data=request.data)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n instance = serializer.create()\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n data = {'id': instance.id}\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n anio = get_object_or_404(self.get_queryset(), pk=pk)\n serializer = serializers_anio.EditAnioSerializer(data=request.data, partial=True)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.update(anio, serializer.validated_data)\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n response_serializer = serializers_anio.AnioSerializer(anio)\n data = response_serializer.data\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = Anio.objects.all()\n anio = get_object_or_404(queryset, pk=pk)\n if Curso.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga cursos!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n if Materia.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga materias!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n anio.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n get_object_or_404(Carrera.objects.filter(institucion_id=request.user.institucion.id), pk=carrera_id)\n queryset = Anio.objects.filter(carrera_id=carrera_id, carrera__institucion_id=request.user.institucion.id)\n serializer = serializers_anio.AnioSerializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_3|>\n\n<|body_start_4|>\n queryset = Anio.objects.filter(carrera__institucion_id=request.user.institucion.id)\n anio = get_object_or_404(queryset, pk=pk)\n serializer = serializers_anio.AnioSerializer(anio)\n return Response(serializer.data)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "AnioViewSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AnioViewSet:\n\n def create(self, request):\n \"\"\"Crear un anio con sus cursos (opcional)\"\"\"\n <|body_0|>\n\n def update(self, request, pk=None):\n \"\"\"Editar un anio, sin afectar sus cursos (se editan aparte)\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Elimina un Anio\"\"\"\n <|body_2|>\n\n def list(self, request, carrera_id=None):\n \"\"\"Listar Anios\"\"\"\n <|body_3|>\n\n def get(self, request, pk=None):\n \"\"\"Ver un anio\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serializer = serializers_anio.CreateAnioSerializer(data=request.data)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n instance = serializer.create()\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n data = {'id': instance.id}\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n anio = get_object_or_404(self.get_queryset(), pk=pk)\n serializer = serializers_anio.EditAnioSerializer(data=request.data, partial=True)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.update(anio, serializer.validated_data)\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n response_serializer = serializers_anio.AnioSerializer(anio)\n data = response_serializer.data\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = Anio.objects.all()\n anio = get_object_or_404(queryset, pk=pk)\n if Curso.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga cursos!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n if Materia.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga materias!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n anio.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n get_object_or_404(Carrera.objects.filter(institucion_id=request.user.institucion.id), pk=carrera_id)\n queryset = Anio.objects.filter(carrera_id=carrera_id, carrera__institucion_id=request.user.institucion.id)\n serializer = serializers_anio.AnioSerializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_3|>\n\n<|body_start_4|>\n queryset = Anio.objects.filter(carrera__institucion_id=request.user.institucion.id)\n anio = get_object_or_404(queryset, pk=pk)\n serializer = serializers_anio.AnioSerializer(anio)\n return Response(serializer.data)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000088", "length_bytes": 9416, "license_type": "no_license", "methods": [{"docstring": "Crear un anio con sus cursos (opcional)", "name": "create", "signature": "def create(self, request)"}, {"docstring": "Editar un anio, sin afectar sus cursos (se editan aparte)", "name": "update", "signature": "def update(self, request, pk=None)"}, {"docstring": "Elimina un Anio", "name": "destroy", "signature": "def destroy(self, request, pk=None)"}, {"docstring": "Listar Anios", "name": "list", "signature": "def list(self, request, carrera_id=None)"}, {"docstring": "Ver un anio", "name": "get", "signature": "def get(self, request, pk=None)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_006590", "prompt": "Implement the Python class `AnioViewSet` described below.\n\nClass description:\nImplement the AnioViewSet class.\n\nMethod signatures and docstrings:\n- def create(self, request): Crear un anio con sus cursos (opcional)\n- def update(self, request, pk=None): Editar un anio, sin afectar sus cursos (se editan aparte)\n- def destroy(self, request, pk=None): Elimina un Anio\n- def list(self, request, carrera_id=None): Listar Anios\n- def get(self, request, pk=None): Ver un anio", "prompted_full_text": "Implement the Python class `AnioViewSet` described below.\n\nClass description:\nImplement the AnioViewSet class.\n\nMethod signatures and docstrings:\n- def create(self, request): Crear un anio con sus cursos (opcional)\n- def update(self, request, pk=None): Editar un anio, sin afectar sus cursos (se editan aparte)\n- def destroy(self, request, pk=None): Elimina un Anio\n- def list(self, request, carrera_id=None): Listar Anios\n- def get(self, request, pk=None): Ver un anio\n\n<|skeleton|>\nclass AnioViewSet:\n\n def create(self, request):\n \"\"\"Crear un anio con sus cursos (opcional)\"\"\"\n <|body_0|>\n\n def update(self, request, pk=None):\n \"\"\"Editar un anio, sin afectar sus cursos (se editan aparte)\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Elimina un Anio\"\"\"\n <|body_2|>\n\n def list(self, request, carrera_id=None):\n \"\"\"Listar Anios\"\"\"\n <|body_3|>\n\n def get(self, request, pk=None):\n \"\"\"Ver un anio\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serializer = serializers_anio.CreateAnioSerializer(data=request.data)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n instance = serializer.create()\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n data = {'id': instance.id}\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n anio = get_object_or_404(self.get_queryset(), pk=pk)\n serializer = serializers_anio.EditAnioSerializer(data=request.data, partial=True)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.update(anio, serializer.validated_data)\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n response_serializer = serializers_anio.AnioSerializer(anio)\n data = response_serializer.data\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n queryset = Anio.objects.all()\n anio = get_object_or_404(queryset, pk=pk)\n if Curso.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga cursos!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n if Materia.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga materias!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n anio.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n get_object_or_404(Carrera.objects.filter(institucion_id=request.user.institucion.id), pk=carrera_id)\n queryset = Anio.objects.filter(carrera_id=carrera_id, carrera__institucion_id=request.user.institucion.id)\n serializer = serializers_anio.AnioSerializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_3|>\n\n<|body_start_4|>\n queryset = Anio.objects.filter(carrera__institucion_id=request.user.institucion.id)\n anio = get_object_or_404(queryset, pk=pk)\n serializer = serializers_anio.AnioSerializer(anio)\n return Response(serializer.data)\n<|end_body_4|>\n", "revision_id": "be80b2d15f84a8eeba898e753efee348de6ce998", "skeleton": "<|skeleton|>\nclass AnioViewSet:\n\n def create(self, request):\n \"\"\"Crear un anio con sus cursos (opcional)\"\"\"\n <|body_0|>\n\n def update(self, request, pk=None):\n \"\"\"Editar un anio, sin afectar sus cursos (se editan aparte)\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Elimina un Anio\"\"\"\n <|body_2|>\n\n def list(self, request, carrera_id=None):\n \"\"\"Listar Anios\"\"\"\n <|body_3|>\n\n def get(self, request, pk=None):\n \"\"\"Ver un anio\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AnioViewSet:\n def create(self, request):\n \"\"\"Crear un anio con sus cursos (opcional)\"\"\"\n serializer = serializers_anio.CreateAnioSerializer(data=request.data)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n instance = serializer.create()\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n data = {'id': instance.id}\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_201_CREATED)\n\n def update(self, request, pk=None):\n \"\"\"Editar un anio, sin afectar sus cursos (se editan aparte)\"\"\"\n anio = get_object_or_404(self.get_queryset(), pk=pk)\n serializer = serializers_anio.EditAnioSerializer(data=request.data, partial=True)\n data = {}\n if serializer.is_valid(raise_exception=True):\n try:\n serializer.update(anio, serializer.validated_data)\n except ValidationError as e:\n return Response(data={'detail': e.message}, status=status.HTTP_400_BAD_REQUEST)\n response_serializer = serializers_anio.AnioSerializer(anio)\n data = response_serializer.data\n else:\n data = serializer.errors\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n return Response(data=data, status=status.HTTP_200_OK)\n\n def destroy(self, request, pk=None):\n \"\"\"Elimina un Anio\"\"\"\n queryset = Anio.objects.all()\n anio = get_object_or_404(queryset, pk=pk)\n if Curso.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga cursos!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n if Materia.objects.filter(anio=anio).count() != 0:\n data = {'detail': 'No se puede eliminar un año que ya contenga materias!'}\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n anio.delete()\n return Response(status=status.HTTP_200_OK)\n\n def list(self, request, carrera_id=None):\n \"\"\"Listar Anios\"\"\"\n get_object_or_404(Carrera.objects.filter(institucion_id=request.user.institucion.id), pk=carrera_id)\n queryset = Anio.objects.filter(carrera_id=carrera_id, carrera__institucion_id=request.user.institucion.id)\n serializer = serializers_anio.AnioSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def get(self, request, pk=None):\n \"\"\"Ver un anio\"\"\"\n queryset = Anio.objects.filter(carrera__institucion_id=request.user.institucion.id)\n anio = get_object_or_404(queryset, pk=pk)\n serializer = serializers_anio.AnioSerializer(anio)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "curricula/api/views/anio.py", "source_repo": "Clear-Education/ontrack_backend", "split": "val", "star_events_count": 1} {"blob_id": "8e1e4ebaaffdcc24bcf5eda616ab59b18b1a7d12", "bodies": ["super(MixAuxiliaryLoss, self).__init__()\nself.aux_weight = aux_weight\nloss_base_cp = loss_base.copy()\nloss_base_name = loss_base_cp.pop('type')\nself.loss_fn = ClassFactory.get_cls('trainer.loss', loss_base_name)(**loss_base_cp['params'])", "if len(outputs) != 2:\n raise Exception('outputs length must be 2')\nloss0 = self.loss_fn(outputs[0], targets)\nloss1 = self.loss_fn(outputs[1], targets)\nreturn loss0 + self.aux_weight * loss1"], "bodies_text": "<|body_start_0|>\n super(MixAuxiliaryLoss, self).__init__()\n self.aux_weight = aux_weight\n loss_base_cp = loss_base.copy()\n loss_base_name = loss_base_cp.pop('type')\n self.loss_fn = ClassFactory.get_cls('trainer.loss', loss_base_name)(**loss_base_cp['params'])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(outputs) != 2:\n raise Exception('outputs length must be 2')\n loss0 = self.loss_fn(outputs[0], targets)\n loss1 = self.loss_fn(outputs[1], targets)\n return loss0 + self.aux_weight * loss1\n<|end_body_1|>\n", "class_docstring": "Class of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str", "class_name": "MixAuxiliaryLoss", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MixAuxiliaryLoss:\n \"\"\"Class of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\"\"\"\n\n def __init__(self, aux_weight, loss_base):\n \"\"\"Init MixAuxiliaryLoss.\"\"\"\n <|body_0|>\n\n def forward(self, outputs, targets):\n \"\"\"Loss forward function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MixAuxiliaryLoss, self).__init__()\n self.aux_weight = aux_weight\n loss_base_cp = loss_base.copy()\n loss_base_name = loss_base_cp.pop('type')\n self.loss_fn = ClassFactory.get_cls('trainer.loss', loss_base_name)(**loss_base_cp['params'])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(outputs) != 2:\n raise Exception('outputs length must be 2')\n loss0 = self.loss_fn(outputs[0], targets)\n loss1 = self.loss_fn(outputs[1], targets)\n return loss0 + self.aux_weight * loss1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000089", "length_bytes": 1457, "license_type": "permissive", "methods": [{"docstring": "Init MixAuxiliaryLoss.", "name": "__init__", "signature": "def __init__(self, aux_weight, loss_base)"}, {"docstring": "Loss forward function.", "name": "forward", "signature": "def forward(self, outputs, targets)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000724", "prompt": "Implement the Python class `MixAuxiliaryLoss` described below.\n\nClass description:\nClass of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\n\nMethod signatures and docstrings:\n- def __init__(self, aux_weight, loss_base): Init MixAuxiliaryLoss.\n- def forward(self, outputs, targets): Loss forward function.", "prompted_full_text": "Implement the Python class `MixAuxiliaryLoss` described below.\n\nClass description:\nClass of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\n\nMethod signatures and docstrings:\n- def __init__(self, aux_weight, loss_base): Init MixAuxiliaryLoss.\n- def forward(self, outputs, targets): Loss forward function.\n\n<|skeleton|>\nclass MixAuxiliaryLoss:\n \"\"\"Class of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\"\"\"\n\n def __init__(self, aux_weight, loss_base):\n \"\"\"Init MixAuxiliaryLoss.\"\"\"\n <|body_0|>\n\n def forward(self, outputs, targets):\n \"\"\"Loss forward function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MixAuxiliaryLoss, self).__init__()\n self.aux_weight = aux_weight\n loss_base_cp = loss_base.copy()\n loss_base_name = loss_base_cp.pop('type')\n self.loss_fn = ClassFactory.get_cls('trainer.loss', loss_base_name)(**loss_base_cp['params'])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(outputs) != 2:\n raise Exception('outputs length must be 2')\n loss0 = self.loss_fn(outputs[0], targets)\n loss1 = self.loss_fn(outputs[1], targets)\n return loss0 + self.aux_weight * loss1\n<|end_body_1|>\n", "revision_id": "e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04", "skeleton": "<|skeleton|>\nclass MixAuxiliaryLoss:\n \"\"\"Class of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\"\"\"\n\n def __init__(self, aux_weight, loss_base):\n \"\"\"Init MixAuxiliaryLoss.\"\"\"\n <|body_0|>\n\n def forward(self, outputs, targets):\n \"\"\"Loss forward function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MixAuxiliaryLoss:\n \"\"\"Class of Mix Auxiliary Loss. :param aux_weight: auxiliary loss weight :type aux_weight: float :loss_base: base loss function :loss_base: str\"\"\"\n\n def __init__(self, aux_weight, loss_base):\n \"\"\"Init MixAuxiliaryLoss.\"\"\"\n super(MixAuxiliaryLoss, self).__init__()\n self.aux_weight = aux_weight\n loss_base_cp = loss_base.copy()\n loss_base_name = loss_base_cp.pop('type')\n self.loss_fn = ClassFactory.get_cls('trainer.loss', loss_base_name)(**loss_base_cp['params'])\n\n def forward(self, outputs, targets):\n \"\"\"Loss forward function.\"\"\"\n if len(outputs) != 2:\n raise Exception('outputs length must be 2')\n loss0 = self.loss_fn(outputs[0], targets)\n loss1 = self.loss_fn(outputs[1], targets)\n return loss0 + self.aux_weight * loss1\n", "source": "the_stack_v2_python_sparse", "source_path": "zeus/networks/pytorch/losses/mix_auxiliary_loss.py", "source_repo": "huawei-noah/xingtian", "split": "val", "star_events_count": 308} {"blob_id": "23191e1c4686794977bef71b4803dc9a4f457fe8", "bodies": ["result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\nexcept_result = 0.5\nmsg = '测试两正数相除'\ntry:\n result = MathNum(5, 10).two_division()\n self.assertEqual(except_result, result, msg=msg)\nexcept AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\nelse:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n self.assertAlmostEqual()", "result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\nexcept_result = 0.5\nmsg = '测试两正数相除'\ntry:\n result = MathNum(-5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\nexcept AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\nelse:\n file.write('\\n{},Pass\\n'.format(result_address + msg))", "result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\nexcept_result = -0.5\nmsg = '测试一正一负相除'\ntry:\n result = MathNum(5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\nexcept AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\nelse:\n file.write('\\n{},Pass\\n'.format(result_address + msg))", "result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\ntry:\n MathNum(5, 'r').two_division()\nexcept TypeError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n raise a"], "bodies_text": "<|body_start_0|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(5, 10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n self.assertAlmostEqual()\n<|end_body_0|>\n\n<|body_start_1|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(-5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_1|>\n\n<|body_start_2|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = -0.5\n msg = '测试一正一负相除'\n try:\n result = MathNum(5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_2|>\n\n<|body_start_3|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n try:\n MathNum(5, 'r').two_division()\n except TypeError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n raise a\n<|end_body_3|>\n", "class_docstring": "测试两数相除的类", "class_name": "TestDivision", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestDivision:\n \"\"\"测试两数相除的类\"\"\"\n\n def test_two_positive_division(self):\n \"\"\"两正数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_0|>\n\n def test_two_negative(self):\n \"\"\"两负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_1|>\n\n def test_positive_negative(self):\n \"\"\"一正数一负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_2|>\n\n def test_num_str(self):\n \"\"\"一数与字符串相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(5, 10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n self.assertAlmostEqual()\n<|end_body_0|>\n\n<|body_start_1|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(-5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_1|>\n\n<|body_start_2|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = -0.5\n msg = '测试一正一负相除'\n try:\n result = MathNum(5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_2|>\n\n<|body_start_3|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n try:\n MathNum(5, 'r').two_division()\n except TypeError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n raise a\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000090", "length_bytes": 7448, "license_type": "no_license", "methods": [{"docstring": "两正数相除 :return: 当前测试用例的名称及测试结果", "name": "test_two_positive_division", "signature": "def test_two_positive_division(self)"}, {"docstring": "两负数相除 :return: 当前测试用例的名称及测试结果", "name": "test_two_negative", "signature": "def test_two_negative(self)"}, {"docstring": "一正数一负数相除 :return: 当前测试用例的名称及测试结果", "name": "test_positive_negative", "signature": "def test_positive_negative(self)"}, {"docstring": "一数与字符串相除 :return: 当前测试用例的名称及测试结果", "name": "test_num_str", "signature": "def test_num_str(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004009", "prompt": "Implement the Python class `TestDivision` described below.\n\nClass description:\n测试两数相除的类\n\nMethod signatures and docstrings:\n- def test_two_positive_division(self): 两正数相除 :return: 当前测试用例的名称及测试结果\n- def test_two_negative(self): 两负数相除 :return: 当前测试用例的名称及测试结果\n- def test_positive_negative(self): 一正数一负数相除 :return: 当前测试用例的名称及测试结果\n- def test_num_str(self): 一数与字符串相除 :return: 当前测试用例的名称及测试结果", "prompted_full_text": "Implement the Python class `TestDivision` described below.\n\nClass description:\n测试两数相除的类\n\nMethod signatures and docstrings:\n- def test_two_positive_division(self): 两正数相除 :return: 当前测试用例的名称及测试结果\n- def test_two_negative(self): 两负数相除 :return: 当前测试用例的名称及测试结果\n- def test_positive_negative(self): 一正数一负数相除 :return: 当前测试用例的名称及测试结果\n- def test_num_str(self): 一数与字符串相除 :return: 当前测试用例的名称及测试结果\n\n<|skeleton|>\nclass TestDivision:\n \"\"\"测试两数相除的类\"\"\"\n\n def test_two_positive_division(self):\n \"\"\"两正数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_0|>\n\n def test_two_negative(self):\n \"\"\"两负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_1|>\n\n def test_positive_negative(self):\n \"\"\"一正数一负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_2|>\n\n def test_num_str(self):\n \"\"\"一数与字符串相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(5, 10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n self.assertAlmostEqual()\n<|end_body_0|>\n\n<|body_start_1|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(-5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_1|>\n\n<|body_start_2|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = -0.5\n msg = '测试一正一负相除'\n try:\n result = MathNum(5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n<|end_body_2|>\n\n<|body_start_3|>\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n try:\n MathNum(5, 'r').two_division()\n except TypeError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n raise a\n<|end_body_3|>\n", "revision_id": "79408b1eb1599349d6b23ddc4307bb5780f9669c", "skeleton": "<|skeleton|>\nclass TestDivision:\n \"\"\"测试两数相除的类\"\"\"\n\n def test_two_positive_division(self):\n \"\"\"两正数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_0|>\n\n def test_two_negative(self):\n \"\"\"两负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_1|>\n\n def test_positive_negative(self):\n \"\"\"一正数一负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_2|>\n\n def test_num_str(self):\n \"\"\"一数与字符串相除 :return: 当前测试用例的名称及测试结果\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestDivision:\n \"\"\"测试两数相除的类\"\"\"\n\n def test_two_positive_division(self):\n \"\"\"两正数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(5, 10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n self.assertAlmostEqual()\n\n def test_two_negative(self):\n \"\"\"两负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = 0.5\n msg = '测试两正数相除'\n try:\n result = MathNum(-5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n\n def test_positive_negative(self):\n \"\"\"一正数一负数相除 :return: 当前测试用例的名称及测试结果\"\"\"\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n except_result = -0.5\n msg = '测试一正一负相除'\n try:\n result = MathNum(5, -10).two_division()\n self.assertEqual(except_result, result, msg=msg)\n except AssertionError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(msg, a))\n else:\n file.write('\\n{},Pass\\n'.format(result_address + msg))\n\n def test_num_str(self):\n \"\"\"一数与字符串相除 :return: 当前测试用例的名称及测试结果\"\"\"\n result_address = '测试时间:{}测试用例名称是{},测试结果是: '.format(TestDivision.now, inspect.stack()[0][3])\n try:\n MathNum(5, 'r').two_division()\n except TypeError as a:\n file.write('\\n{}不通过,原因是{}\\n'.format(result_address, a))\n raise a\n", "source": "the_stack_v2_python_sparse", "source_path": "WebAPI/lesson_14_0426.py", "source_repo": "grassroadsZ/PythonStudy", "split": "val", "star_events_count": 0} {"blob_id": "b5c64fff565bb10820b5416cb5d790edb3019cb2", "bodies": ["if post_data is None:\n res = requests.get(url, headers=headers)\n html_tree = etree.HTML(res.text)\nelse:\n res = requests.post(url, headers=headers, post_data=post_data)\n html_tree = etree.HTML(res.text)\nreturn {'__VIEWSTATE': html_tree.xpath(view_state)[0], '__EVENTVALIDATION': html_tree.xpath(event_validation)[0]}", "url_encode = ''\nfor ch in url:\n if '一' <= ch <= '鿿':\n ch = parse.quote(ch, encoding=encode)\n url_encode = str(url_encode) + str(ch)\nreturn url_encode"], "bodies_text": "<|body_start_0|>\n if post_data is None:\n res = requests.get(url, headers=headers)\n html_tree = etree.HTML(res.text)\n else:\n res = requests.post(url, headers=headers, post_data=post_data)\n html_tree = etree.HTML(res.text)\n return {'__VIEWSTATE': html_tree.xpath(view_state)[0], '__EVENTVALIDATION': html_tree.xpath(event_validation)[0]}\n<|end_body_0|>\n\n<|body_start_1|>\n url_encode = ''\n for ch in url:\n if '一' <= ch <= '鿿':\n ch = parse.quote(ch, encoding=encode)\n url_encode = str(url_encode) + str(ch)\n return url_encode\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Tool", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Tool:\n\n def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None):\n \"\"\"传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\"\"\"\n <|body_0|>\n\n def url_quote(cls, url, encode=None):\n \"\"\"把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if post_data is None:\n res = requests.get(url, headers=headers)\n html_tree = etree.HTML(res.text)\n else:\n res = requests.post(url, headers=headers, post_data=post_data)\n html_tree = etree.HTML(res.text)\n return {'__VIEWSTATE': html_tree.xpath(view_state)[0], '__EVENTVALIDATION': html_tree.xpath(event_validation)[0]}\n<|end_body_0|>\n\n<|body_start_1|>\n url_encode = ''\n for ch in url:\n if '一' <= ch <= '鿿':\n ch = parse.quote(ch, encoding=encode)\n url_encode = str(url_encode) + str(ch)\n return url_encode\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000091", "length_bytes": 1777, "license_type": "no_license", "methods": [{"docstring": "传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}", "name": "get_view_state", "signature": "def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None)"}, {"docstring": "把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str", "name": "url_quote", "signature": "def url_quote(cls, url, encode=None)"}], "n_methods": 2, "prompt": "Implement the Python class `Tool` described below.\n\nClass description:\nImplement the Tool class.\n\nMethod signatures and docstrings:\n- def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None): 传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\n- def url_quote(cls, url, encode=None): 把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str", "prompted_full_text": "Implement the Python class `Tool` described below.\n\nClass description:\nImplement the Tool class.\n\nMethod signatures and docstrings:\n- def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None): 传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\n- def url_quote(cls, url, encode=None): 把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str\n\n<|skeleton|>\nclass Tool:\n\n def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None):\n \"\"\"传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\"\"\"\n <|body_0|>\n\n def url_quote(cls, url, encode=None):\n \"\"\"把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if post_data is None:\n res = requests.get(url, headers=headers)\n html_tree = etree.HTML(res.text)\n else:\n res = requests.post(url, headers=headers, post_data=post_data)\n html_tree = etree.HTML(res.text)\n return {'__VIEWSTATE': html_tree.xpath(view_state)[0], '__EVENTVALIDATION': html_tree.xpath(event_validation)[0]}\n<|end_body_0|>\n\n<|body_start_1|>\n url_encode = ''\n for ch in url:\n if '一' <= ch <= '鿿':\n ch = parse.quote(ch, encoding=encode)\n url_encode = str(url_encode) + str(ch)\n return url_encode\n<|end_body_1|>\n", "revision_id": "808cb78fc3887f35bf838d77d62308fce9e6aa5d", "skeleton": "<|skeleton|>\nclass Tool:\n\n def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None):\n \"\"\"传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\"\"\"\n <|body_0|>\n\n def url_quote(cls, url, encode=None):\n \"\"\"把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Tool:\n def get_view_state(cls, url, view_state, event_validation, post_data=None, headers=None):\n \"\"\"传入view_state,event_validation的xpath :param url: http://www.jscsfc.com/NewHouse/ :param view_state: //*[@id=\"__VIEWSTATE\"]/@value :param event_validation: //*[@id=\"__EVENTVALIDATION\"]/@value :return: {'__VIEWSTATE': html_tree.xpath(view_state), '__EVENTVALIDATION': html_tree.xpath(event_validation)}\"\"\"\n if post_data is None:\n res = requests.get(url, headers=headers)\n html_tree = etree.HTML(res.text)\n else:\n res = requests.post(url, headers=headers, post_data=post_data)\n html_tree = etree.HTML(res.text)\n return {'__VIEWSTATE': html_tree.xpath(view_state)[0], '__EVENTVALIDATION': html_tree.xpath(event_validation)[0]}\n\n def url_quote(cls, url, encode=None):\n \"\"\"把url里面的中文转码 :param url: str :param encode: default utf-8 :return: str\"\"\"\n url_encode = ''\n for ch in url:\n if '一' <= ch <= '鿿':\n ch = parse.quote(ch, encoding=encode)\n url_encode = str(url_encode) + str(ch)\n return url_encode\n", "source": "the_stack_v2_python_sparse", "source_path": "hilder_gv/backup/tool.py", "source_repo": "pjkui/githubproject", "split": "val", "star_events_count": 0} {"blob_id": "b01617f019d054e432b5e5dc658970142c446ebc", "bodies": ["m, n = (len(word1), len(word2))\ndp = [[-1] * (n + 1) for _ in range(m + 1)]\nfor i in range(m + 1):\n dp[i][0] = i\nfor j in range(n + 1):\n dp[0][j] = j\nfor i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1])\nreturn dp[-1][-1]", "def search(w1, w2):\n if len(w2) == 0:\n return len(w1)\n elif len(w1) < len(w2):\n return search(w2, w1)\n elif len(w2) == 1:\n count = len(w1) - 1 if w2 in w1 else len(w1)\n dp[w1, w2] = count\n return count\n count = float('inf')\n if w1[0] == w2[0]:\n count = search(w1[1:], w2[1:])\n else:\n count = min(1 + search(w1[1:], w2), 1 + search(w1[1:], w2[1:]))\n dp[w1, w2] = count\n return count\ndp = {}\nword1, word2 = (word1, word2) if len(word1) >= len(word2) else (word2, word1)\nreturn search(word1, word2)"], "bodies_text": "<|body_start_0|>\n m, n = (len(word1), len(word2))\n dp = [[-1] * (n + 1) for _ in range(m + 1)]\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1])\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n def search(w1, w2):\n if len(w2) == 0:\n return len(w1)\n elif len(w1) < len(w2):\n return search(w2, w1)\n elif len(w2) == 1:\n count = len(w1) - 1 if w2 in w1 else len(w1)\n dp[w1, w2] = count\n return count\n count = float('inf')\n if w1[0] == w2[0]:\n count = search(w1[1:], w2[1:])\n else:\n count = min(1 + search(w1[1:], w2), 1 + search(w1[1:], w2[1:]))\n dp[w1, w2] = count\n return count\n dp = {}\n word1, word2 = (word1, word2) if len(word1) >= len(word2) else (word2, word1)\n return search(word1, word2)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minDistance(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_0|>\n\n def minDistance_TLE(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(word1), len(word2))\n dp = [[-1] * (n + 1) for _ in range(m + 1)]\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1])\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n def search(w1, w2):\n if len(w2) == 0:\n return len(w1)\n elif len(w1) < len(w2):\n return search(w2, w1)\n elif len(w2) == 1:\n count = len(w1) - 1 if w2 in w1 else len(w1)\n dp[w1, w2] = count\n return count\n count = float('inf')\n if w1[0] == w2[0]:\n count = search(w1[1:], w2[1:])\n else:\n count = min(1 + search(w1[1:], w2), 1 + search(w1[1:], w2[1:]))\n dp[w1, w2] = count\n return count\n dp = {}\n word1, word2 = (word1, word2) if len(word1) >= len(word2) else (word2, word1)\n return search(word1, word2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000092", "length_bytes": 2975, "license_type": "no_license", "methods": [{"docstring": ":type word1: str :type word2: str :rtype: int", "name": "minDistance", "signature": "def minDistance(self, word1, word2)"}, {"docstring": ":type word1: str :type word2: str :rtype: int", "name": "minDistance_TLE", "signature": "def minDistance_TLE(self, word1, word2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007314", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDistance(self, word1, word2): :type word1: str :type word2: str :rtype: int\n- def minDistance_TLE(self, word1, word2): :type word1: str :type word2: str :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDistance(self, word1, word2): :type word1: str :type word2: str :rtype: int\n- def minDistance_TLE(self, word1, word2): :type word1: str :type word2: str :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def minDistance(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_0|>\n\n def minDistance_TLE(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(word1), len(word2))\n dp = [[-1] * (n + 1) for _ in range(m + 1)]\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1])\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n def search(w1, w2):\n if len(w2) == 0:\n return len(w1)\n elif len(w1) < len(w2):\n return search(w2, w1)\n elif len(w2) == 1:\n count = len(w1) - 1 if w2 in w1 else len(w1)\n dp[w1, w2] = count\n return count\n count = float('inf')\n if w1[0] == w2[0]:\n count = search(w1[1:], w2[1:])\n else:\n count = min(1 + search(w1[1:], w2), 1 + search(w1[1:], w2[1:]))\n dp[w1, w2] = count\n return count\n dp = {}\n word1, word2 = (word1, word2) if len(word1) >= len(word2) else (word2, word1)\n return search(word1, word2)\n<|end_body_1|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minDistance(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_0|>\n\n def minDistance_TLE(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minDistance(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n m, n = (len(word1), len(word2))\n dp = [[-1] * (n + 1) for _ in range(m + 1)]\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1])\n return dp[-1][-1]\n\n def minDistance_TLE(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n def search(w1, w2):\n if len(w2) == 0:\n return len(w1)\n elif len(w1) < len(w2):\n return search(w2, w1)\n elif len(w2) == 1:\n count = len(w1) - 1 if w2 in w1 else len(w1)\n dp[w1, w2] = count\n return count\n count = float('inf')\n if w1[0] == w2[0]:\n count = search(w1[1:], w2[1:])\n else:\n count = min(1 + search(w1[1:], w2), 1 + search(w1[1:], w2[1:]))\n dp[w1, w2] = count\n return count\n dp = {}\n word1, word2 = (word1, word2) if len(word1) >= len(word2) else (word2, word1)\n return search(word1, word2)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_72.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0} {"blob_id": "1ba6907acc4472f1a052f2d866b46f94431b86d4", "bodies": ["polling_time = 0\nresult = cls.get_task_result(job_instance_id)\nwhile not result['is_finished']:\n if polling_time > POLLING_TIMEOUT:\n logger.error('user->[{}] called api->[get_task_result] but got JobExecuteTimeout.'.format(settings.BACKEND_JOB_OPERATOR))\n raise JobPollTimeout({'job_instance_id': job_instance_id})\n polling_time += POLLING_INTERVAL\n time.sleep(POLLING_INTERVAL)\n result = cls.get_task_result(job_instance_id)\nreturn result", "params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'return_ip_result': True}\njob_status = JobApi.get_job_instance_status(params)\nis_finished = job_status['finished']\nhost_infos__gby_job_status = defaultdict(list)\nstep_instance_id = job_status['step_instance_list'][0]['step_instance_id']\nfor instance in job_status['step_instance_list'][0]['step_ip_result_list']:\n if settings.BKAPP_ENABLE_DHCP:\n host_info = {'ip': instance['ip'], 'bk_cloud_id': instance['bk_cloud_id']}\n else:\n host_info = {'bk_host_id': instance['bk_host_id']}\n host_infos__gby_job_status[instance['status']].append(host_info)\nlogger.info('user->[{}] called api->[{}] and got response->[{}].'.format(settings.BACKEND_JOB_OPERATOR, job_instance_id, json.dumps(job_status)))\ntask_result = {'success': [], 'pending': [], 'failed': []}\nfor status, hosts in host_infos__gby_job_status.items():\n if status == JobIPStatus.SUCCESS:\n key = 'success'\n elif status in (JobIPStatus.WAITING_FOR_EXEC, JobIPStatus.RUNNING):\n key = 'pending'\n else:\n key = 'failed'\n for host in hosts:\n base_log_params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'step_instance_id': step_instance_id}\n host_interaction_params: Dict[str, Union[str, int]] = {'bk_host_id': host['bk_host_id']} if settings.BKAPP_ENABLE_DHCP else {'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id']}\n log_result = JobApi.get_job_instance_ip_log({**base_log_params, **host_interaction_params})\n if settings.BKAPP_ENABLE_DHCP:\n task_result[key].append({'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id'], 'log_content': log_result['log_content']})\n else:\n task_result[key].append({'bk_host_id': host['bk_host_id'], 'log_content': log_result['log_content']})\nreturn {'is_finished': is_finished, 'task_result': task_result}"], "bodies_text": "<|body_start_0|>\n polling_time = 0\n result = cls.get_task_result(job_instance_id)\n while not result['is_finished']:\n if polling_time > POLLING_TIMEOUT:\n logger.error('user->[{}] called api->[get_task_result] but got JobExecuteTimeout.'.format(settings.BACKEND_JOB_OPERATOR))\n raise JobPollTimeout({'job_instance_id': job_instance_id})\n polling_time += POLLING_INTERVAL\n time.sleep(POLLING_INTERVAL)\n result = cls.get_task_result(job_instance_id)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'return_ip_result': True}\n job_status = JobApi.get_job_instance_status(params)\n is_finished = job_status['finished']\n host_infos__gby_job_status = defaultdict(list)\n step_instance_id = job_status['step_instance_list'][0]['step_instance_id']\n for instance in job_status['step_instance_list'][0]['step_ip_result_list']:\n if settings.BKAPP_ENABLE_DHCP:\n host_info = {'ip': instance['ip'], 'bk_cloud_id': instance['bk_cloud_id']}\n else:\n host_info = {'bk_host_id': instance['bk_host_id']}\n host_infos__gby_job_status[instance['status']].append(host_info)\n logger.info('user->[{}] called api->[{}] and got response->[{}].'.format(settings.BACKEND_JOB_OPERATOR, job_instance_id, json.dumps(job_status)))\n task_result = {'success': [], 'pending': [], 'failed': []}\n for status, hosts in host_infos__gby_job_status.items():\n if status == JobIPStatus.SUCCESS:\n key = 'success'\n elif status in (JobIPStatus.WAITING_FOR_EXEC, JobIPStatus.RUNNING):\n key = 'pending'\n else:\n key = 'failed'\n for host in hosts:\n base_log_params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'step_instance_id': step_instance_id}\n host_interaction_params: Dict[str, Union[str, int]] = {'bk_host_id': host['bk_host_id']} if settings.BKAPP_ENABLE_DHCP else {'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id']}\n log_result = JobApi.get_job_instance_ip_log({**base_log_params, **host_interaction_params})\n if settings.BKAPP_ENABLE_DHCP:\n task_result[key].append({'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id'], 'log_content': log_result['log_content']})\n else:\n task_result[key].append({'bk_host_id': host['bk_host_id'], 'log_content': log_result['log_content']})\n return {'is_finished': is_finished, 'task_result': task_result}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "JobDemand", "detected_licenses": ["MIT", "LicenseRef-scancode-free-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JobDemand:\n\n def poll_task_result(cls, job_instance_id: int):\n \"\"\"轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\"\"\"\n <|body_0|>\n\n def get_task_result(cls, job_instance_id: int):\n \"\"\"获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n polling_time = 0\n result = cls.get_task_result(job_instance_id)\n while not result['is_finished']:\n if polling_time > POLLING_TIMEOUT:\n logger.error('user->[{}] called api->[get_task_result] but got JobExecuteTimeout.'.format(settings.BACKEND_JOB_OPERATOR))\n raise JobPollTimeout({'job_instance_id': job_instance_id})\n polling_time += POLLING_INTERVAL\n time.sleep(POLLING_INTERVAL)\n result = cls.get_task_result(job_instance_id)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'return_ip_result': True}\n job_status = JobApi.get_job_instance_status(params)\n is_finished = job_status['finished']\n host_infos__gby_job_status = defaultdict(list)\n step_instance_id = job_status['step_instance_list'][0]['step_instance_id']\n for instance in job_status['step_instance_list'][0]['step_ip_result_list']:\n if settings.BKAPP_ENABLE_DHCP:\n host_info = {'ip': instance['ip'], 'bk_cloud_id': instance['bk_cloud_id']}\n else:\n host_info = {'bk_host_id': instance['bk_host_id']}\n host_infos__gby_job_status[instance['status']].append(host_info)\n logger.info('user->[{}] called api->[{}] and got response->[{}].'.format(settings.BACKEND_JOB_OPERATOR, job_instance_id, json.dumps(job_status)))\n task_result = {'success': [], 'pending': [], 'failed': []}\n for status, hosts in host_infos__gby_job_status.items():\n if status == JobIPStatus.SUCCESS:\n key = 'success'\n elif status in (JobIPStatus.WAITING_FOR_EXEC, JobIPStatus.RUNNING):\n key = 'pending'\n else:\n key = 'failed'\n for host in hosts:\n base_log_params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'step_instance_id': step_instance_id}\n host_interaction_params: Dict[str, Union[str, int]] = {'bk_host_id': host['bk_host_id']} if settings.BKAPP_ENABLE_DHCP else {'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id']}\n log_result = JobApi.get_job_instance_ip_log({**base_log_params, **host_interaction_params})\n if settings.BKAPP_ENABLE_DHCP:\n task_result[key].append({'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id'], 'log_content': log_result['log_content']})\n else:\n task_result[key].append({'bk_host_id': host['bk_host_id'], 'log_content': log_result['log_content']})\n return {'is_finished': is_finished, 'task_result': task_result}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000093", "length_bytes": 6436, "license_type": "permissive", "methods": [{"docstring": "轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同", "name": "poll_task_result", "signature": "def poll_task_result(cls, job_instance_id: int)"}, {"docstring": "获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }", "name": "get_task_result", "signature": "def get_task_result(cls, job_instance_id: int)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000550", "prompt": "Implement the Python class `JobDemand` described below.\n\nClass description:\nImplement the JobDemand class.\n\nMethod signatures and docstrings:\n- def poll_task_result(cls, job_instance_id: int): 轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\n- def get_task_result(cls, job_instance_id: int): 获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }", "prompted_full_text": "Implement the Python class `JobDemand` described below.\n\nClass description:\nImplement the JobDemand class.\n\nMethod signatures and docstrings:\n- def poll_task_result(cls, job_instance_id: int): 轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\n- def get_task_result(cls, job_instance_id: int): 获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }\n\n<|skeleton|>\nclass JobDemand:\n\n def poll_task_result(cls, job_instance_id: int):\n \"\"\"轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\"\"\"\n <|body_0|>\n\n def get_task_result(cls, job_instance_id: int):\n \"\"\"获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n polling_time = 0\n result = cls.get_task_result(job_instance_id)\n while not result['is_finished']:\n if polling_time > POLLING_TIMEOUT:\n logger.error('user->[{}] called api->[get_task_result] but got JobExecuteTimeout.'.format(settings.BACKEND_JOB_OPERATOR))\n raise JobPollTimeout({'job_instance_id': job_instance_id})\n polling_time += POLLING_INTERVAL\n time.sleep(POLLING_INTERVAL)\n result = cls.get_task_result(job_instance_id)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'return_ip_result': True}\n job_status = JobApi.get_job_instance_status(params)\n is_finished = job_status['finished']\n host_infos__gby_job_status = defaultdict(list)\n step_instance_id = job_status['step_instance_list'][0]['step_instance_id']\n for instance in job_status['step_instance_list'][0]['step_ip_result_list']:\n if settings.BKAPP_ENABLE_DHCP:\n host_info = {'ip': instance['ip'], 'bk_cloud_id': instance['bk_cloud_id']}\n else:\n host_info = {'bk_host_id': instance['bk_host_id']}\n host_infos__gby_job_status[instance['status']].append(host_info)\n logger.info('user->[{}] called api->[{}] and got response->[{}].'.format(settings.BACKEND_JOB_OPERATOR, job_instance_id, json.dumps(job_status)))\n task_result = {'success': [], 'pending': [], 'failed': []}\n for status, hosts in host_infos__gby_job_status.items():\n if status == JobIPStatus.SUCCESS:\n key = 'success'\n elif status in (JobIPStatus.WAITING_FOR_EXEC, JobIPStatus.RUNNING):\n key = 'pending'\n else:\n key = 'failed'\n for host in hosts:\n base_log_params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'step_instance_id': step_instance_id}\n host_interaction_params: Dict[str, Union[str, int]] = {'bk_host_id': host['bk_host_id']} if settings.BKAPP_ENABLE_DHCP else {'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id']}\n log_result = JobApi.get_job_instance_ip_log({**base_log_params, **host_interaction_params})\n if settings.BKAPP_ENABLE_DHCP:\n task_result[key].append({'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id'], 'log_content': log_result['log_content']})\n else:\n task_result[key].append({'bk_host_id': host['bk_host_id'], 'log_content': log_result['log_content']})\n return {'is_finished': is_finished, 'task_result': task_result}\n<|end_body_1|>\n", "revision_id": "72d2104783443bff26c752c5bd934a013b302b6d", "skeleton": "<|skeleton|>\nclass JobDemand:\n\n def poll_task_result(cls, job_instance_id: int):\n \"\"\"轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\"\"\"\n <|body_0|>\n\n def get_task_result(cls, job_instance_id: int):\n \"\"\"获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class JobDemand:\n def poll_task_result(cls, job_instance_id: int):\n \"\"\"轮询直到任务完成 :param job_instance_id: job任务id :return: 与 get_task_result 同\"\"\"\n polling_time = 0\n result = cls.get_task_result(job_instance_id)\n while not result['is_finished']:\n if polling_time > POLLING_TIMEOUT:\n logger.error('user->[{}] called api->[get_task_result] but got JobExecuteTimeout.'.format(settings.BACKEND_JOB_OPERATOR))\n raise JobPollTimeout({'job_instance_id': job_instance_id})\n polling_time += POLLING_INTERVAL\n time.sleep(POLLING_INTERVAL)\n result = cls.get_task_result(job_instance_id)\n return result\n\n def get_task_result(cls, job_instance_id: int):\n \"\"\"获取执行结果 :param job_instance_id: job任务id :return: example: { \"success\": [ { 'ip': 127.0.0.1, 'bk_cloud_id': 0, 'host_id': 1, 'log_content': 'xx', } ], \"pending\": [], \"failed\": [] }\"\"\"\n params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'return_ip_result': True}\n job_status = JobApi.get_job_instance_status(params)\n is_finished = job_status['finished']\n host_infos__gby_job_status = defaultdict(list)\n step_instance_id = job_status['step_instance_list'][0]['step_instance_id']\n for instance in job_status['step_instance_list'][0]['step_ip_result_list']:\n if settings.BKAPP_ENABLE_DHCP:\n host_info = {'ip': instance['ip'], 'bk_cloud_id': instance['bk_cloud_id']}\n else:\n host_info = {'bk_host_id': instance['bk_host_id']}\n host_infos__gby_job_status[instance['status']].append(host_info)\n logger.info('user->[{}] called api->[{}] and got response->[{}].'.format(settings.BACKEND_JOB_OPERATOR, job_instance_id, json.dumps(job_status)))\n task_result = {'success': [], 'pending': [], 'failed': []}\n for status, hosts in host_infos__gby_job_status.items():\n if status == JobIPStatus.SUCCESS:\n key = 'success'\n elif status in (JobIPStatus.WAITING_FOR_EXEC, JobIPStatus.RUNNING):\n key = 'pending'\n else:\n key = 'failed'\n for host in hosts:\n base_log_params = {'job_instance_id': job_instance_id, 'bk_biz_id': settings.BLUEKING_BIZ_ID, 'bk_scope_type': constants.BkJobScopeType.BIZ_SET.value, 'bk_scope_id': settings.BLUEKING_BIZ_ID, 'bk_username': settings.BACKEND_JOB_OPERATOR, 'step_instance_id': step_instance_id}\n host_interaction_params: Dict[str, Union[str, int]] = {'bk_host_id': host['bk_host_id']} if settings.BKAPP_ENABLE_DHCP else {'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id']}\n log_result = JobApi.get_job_instance_ip_log({**base_log_params, **host_interaction_params})\n if settings.BKAPP_ENABLE_DHCP:\n task_result[key].append({'ip': host['ip'], 'bk_cloud_id': host['bk_cloud_id'], 'log_content': log_result['log_content']})\n else:\n task_result[key].append({'bk_host_id': host['bk_host_id'], 'log_content': log_result['log_content']})\n return {'is_finished': is_finished, 'task_result': task_result}\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/node_man/periodic_tasks/utils.py", "source_repo": "TencentBlueKing/bk-nodeman", "split": "val", "star_events_count": 54} {"blob_id": "054eb20e692179306f89a49307baa54c1531b78f", "bodies": ["sc.logger.info('拍摄-拍摄页放弃')\ntime.sleep(1)\nsc.logger.info('点击创作中心主按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/img_creation').click()\nsc.logger.info('点击“拍摄”按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\nel_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\nsc.logger.info('点击录制按钮')\nel_capture.click()\ntime.sleep(5)\nsc.logger.info('拍摄5s后点击录制按钮停止拍摄')\nel_capture.click()\ntry:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\nexcept Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\nsc.logger.info('点击左上角取消按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\nsc.logger.info('点击“丢弃”按钮')\nsc.driver.find_element_by_android_uiautomator('text(\"丢弃\")').click()\nsc.logger.info('拍摄-拍摄页放弃测试完成')", "sc.logger.info('拍摄-拍摄页保存')\nsc.logger.info('点击创作中心“拍摄”按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\ncapture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\nsc.logger.info('点击录制按钮')\ncapture.click()\ntime.sleep(5)\nsc.logger.info('拍摄5s后点击录制按钮停止拍摄')\ncapture.click()\ntry:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\nexcept Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\nsc.logger.info('点击左上角取消按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\nsc.logger.info('点击“保存”按钮')\nsc.driver.find_element_by_android_uiautomator('text(\"保存\")').click()\nsc.logger.info('点击左上角返回按钮退回主页面')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\nsc.logger.info('拍摄-拍摄页保存测试完成')", "sc.logger.info('拍摄-预览页放弃)')\nfun_name = 'test_cancel_preview'\nsc.logger.info('点击创作中心“拍摄”按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\nel_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\nsc.logger.info('点击录制按钮')\nel_capture.click()\ntime.sleep(5)\nsc.logger.info('拍摄5s后点击录制按钮停止拍摄')\nel_capture.click()\ntry:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next'))\nexcept Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\nsc.logger.info('录制完成后点击确认按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next').click()\nsc.capture_screen(fun_name, self.img_path)\nsc.logger.info('点击左上角返回按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\nsc.logger.info('点击左上角取消按钮')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\nsc.logger.info('点击左上角返回按钮退回主页面')\nsc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\nsc.logger.info('拍摄-预览页放弃测试完成')"], "bodies_text": "<|body_start_0|>\n sc.logger.info('拍摄-拍摄页放弃')\n time.sleep(1)\n sc.logger.info('点击创作中心主按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/img_creation').click()\n sc.logger.info('点击“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“丢弃”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"丢弃\")').click()\n sc.logger.info('拍摄-拍摄页放弃测试完成')\n<|end_body_0|>\n\n<|body_start_1|>\n sc.logger.info('拍摄-拍摄页保存')\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“保存”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"保存\")').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-拍摄页保存测试完成')\n<|end_body_1|>\n\n<|body_start_2|>\n sc.logger.info('拍摄-预览页放弃)')\n fun_name = 'test_cancel_preview'\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('录制完成后点击确认按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next').click()\n sc.capture_screen(fun_name, self.img_path)\n sc.logger.info('点击左上角返回按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-预览页放弃测试完成')\n<|end_body_2|>\n", "class_docstring": "camera取消操作相关的测试类.", "class_name": "TestCameraCancel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestCameraCancel:\n \"\"\"camera取消操作相关的测试类.\"\"\"\n\n def test_cancel_shot(self):\n \"\"\"拍摄-拍摄页放弃.\"\"\"\n <|body_0|>\n\n def test_cancel_save(self):\n \"\"\"拍摄-拍摄页保存.\"\"\"\n <|body_1|>\n\n def test_cancel_preview(self):\n \"\"\"拍摄-预览页放弃.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sc.logger.info('拍摄-拍摄页放弃')\n time.sleep(1)\n sc.logger.info('点击创作中心主按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/img_creation').click()\n sc.logger.info('点击“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“丢弃”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"丢弃\")').click()\n sc.logger.info('拍摄-拍摄页放弃测试完成')\n<|end_body_0|>\n\n<|body_start_1|>\n sc.logger.info('拍摄-拍摄页保存')\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“保存”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"保存\")').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-拍摄页保存测试完成')\n<|end_body_1|>\n\n<|body_start_2|>\n sc.logger.info('拍摄-预览页放弃)')\n fun_name = 'test_cancel_preview'\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('录制完成后点击确认按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next').click()\n sc.capture_screen(fun_name, self.img_path)\n sc.logger.info('点击左上角返回按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-预览页放弃测试完成')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000094", "length_bytes": 4622, "license_type": "no_license", "methods": [{"docstring": "拍摄-拍摄页放弃.", "name": "test_cancel_shot", "signature": "def test_cancel_shot(self)"}, {"docstring": "拍摄-拍摄页保存.", "name": "test_cancel_save", "signature": "def test_cancel_save(self)"}, {"docstring": "拍摄-预览页放弃.", "name": "test_cancel_preview", "signature": "def test_cancel_preview(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001653", "prompt": "Implement the Python class `TestCameraCancel` described below.\n\nClass description:\ncamera取消操作相关的测试类.\n\nMethod signatures and docstrings:\n- def test_cancel_shot(self): 拍摄-拍摄页放弃.\n- def test_cancel_save(self): 拍摄-拍摄页保存.\n- def test_cancel_preview(self): 拍摄-预览页放弃.", "prompted_full_text": "Implement the Python class `TestCameraCancel` described below.\n\nClass description:\ncamera取消操作相关的测试类.\n\nMethod signatures and docstrings:\n- def test_cancel_shot(self): 拍摄-拍摄页放弃.\n- def test_cancel_save(self): 拍摄-拍摄页保存.\n- def test_cancel_preview(self): 拍摄-预览页放弃.\n\n<|skeleton|>\nclass TestCameraCancel:\n \"\"\"camera取消操作相关的测试类.\"\"\"\n\n def test_cancel_shot(self):\n \"\"\"拍摄-拍摄页放弃.\"\"\"\n <|body_0|>\n\n def test_cancel_save(self):\n \"\"\"拍摄-拍摄页保存.\"\"\"\n <|body_1|>\n\n def test_cancel_preview(self):\n \"\"\"拍摄-预览页放弃.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sc.logger.info('拍摄-拍摄页放弃')\n time.sleep(1)\n sc.logger.info('点击创作中心主按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/img_creation').click()\n sc.logger.info('点击“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“丢弃”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"丢弃\")').click()\n sc.logger.info('拍摄-拍摄页放弃测试完成')\n<|end_body_0|>\n\n<|body_start_1|>\n sc.logger.info('拍摄-拍摄页保存')\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“保存”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"保存\")').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-拍摄页保存测试完成')\n<|end_body_1|>\n\n<|body_start_2|>\n sc.logger.info('拍摄-预览页放弃)')\n fun_name = 'test_cancel_preview'\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('录制完成后点击确认按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next').click()\n sc.capture_screen(fun_name, self.img_path)\n sc.logger.info('点击左上角返回按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-预览页放弃测试完成')\n<|end_body_2|>\n", "revision_id": "b1190e3df62fa85562c14625c06a9794b8ce29a0", "skeleton": "<|skeleton|>\nclass TestCameraCancel:\n \"\"\"camera取消操作相关的测试类.\"\"\"\n\n def test_cancel_shot(self):\n \"\"\"拍摄-拍摄页放弃.\"\"\"\n <|body_0|>\n\n def test_cancel_save(self):\n \"\"\"拍摄-拍摄页保存.\"\"\"\n <|body_1|>\n\n def test_cancel_preview(self):\n \"\"\"拍摄-预览页放弃.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestCameraCancel:\n \"\"\"camera取消操作相关的测试类.\"\"\"\n\n def test_cancel_shot(self):\n \"\"\"拍摄-拍摄页放弃.\"\"\"\n sc.logger.info('拍摄-拍摄页放弃')\n time.sleep(1)\n sc.logger.info('点击创作中心主按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/img_creation').click()\n sc.logger.info('点击“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“丢弃”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"丢弃\")').click()\n sc.logger.info('拍摄-拍摄页放弃测试完成')\n\n def test_cancel_save(self):\n \"\"\"拍摄-拍摄页保存.\"\"\"\n sc.logger.info('拍摄-拍摄页保存')\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击“保存”按钮')\n sc.driver.find_element_by_android_uiautomator('text(\"保存\")').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-拍摄页保存测试完成')\n\n def test_cancel_preview(self):\n \"\"\"拍摄-预览页放弃.\"\"\"\n sc.logger.info('拍摄-预览页放弃)')\n fun_name = 'test_cancel_preview'\n sc.logger.info('点击创作中心“拍摄”按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/icon2').click()\n el_capture = sc.driver.find_element_by_id('com.quvideo.xiaoying:id/btn_rec')\n sc.logger.info('点击录制按钮')\n el_capture.click()\n time.sleep(5)\n sc.logger.info('拍摄5s后点击录制按钮停止拍摄')\n el_capture.click()\n try:\n WebDriverWait(sc.driver, 60).until(lambda capture: capture.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next'))\n except Exception as e:\n sc.logger.error('拍摄完成但未找到返回按钮', e)\n return False\n sc.logger.info('录制完成后点击确认按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_next').click()\n sc.capture_screen(fun_name, self.img_path)\n sc.logger.info('点击左上角返回按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('点击左上角取消按钮')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/cam_btn_cancel').click()\n sc.logger.info('点击左上角返回按钮退回主页面')\n sc.driver.find_element_by_id('com.quvideo.xiaoying:id/xiaoying_com_btn_left').click()\n sc.logger.info('拍摄-预览页放弃测试完成')\n", "source": "the_stack_v2_python_sparse", "source_path": "Android/VivaVideo/test_creations/test_camera/test_cancel.py", "source_repo": "hicheng/UItest", "split": "val", "star_events_count": 0} {"blob_id": "fb6c99e65a41e15630519bfc7f0e9c1177af1eed", "bodies": ["super(Decoder, self).__init__()\nself.attention = LuongAttention(rnn_size, attention_func)\nself.rnn_size = rnn_size\nself.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\nself.lstm = tf.keras.layers.LSTM(rnn_size, return_sequences=True, return_state=True)\nself.wc = tf.keras.layers.Dense(rnn_size, activation='tanh')\nself.ws = tf.keras.layers.Dense(vocab_size)", "x = self.embedding(x)\noutput, state_h, state_c = self.lstm(x, initial_state=hidden, training=training)\ncontext_vector, alignment = self.attention(output, enc_output)\noutput = tf.concat([tf.squeeze(context_vector, 1), tf.squeeze(output, 1)], 1)\noutput = self.wc(output)\nlogits = self.ws(output)\nreturn (logits, state_h, state_c, alignment)"], "bodies_text": "<|body_start_0|>\n super(Decoder, self).__init__()\n self.attention = LuongAttention(rnn_size, attention_func)\n self.rnn_size = rnn_size\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.lstm = tf.keras.layers.LSTM(rnn_size, return_sequences=True, return_state=True)\n self.wc = tf.keras.layers.Dense(rnn_size, activation='tanh')\n self.ws = tf.keras.layers.Dense(vocab_size)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.embedding(x)\n output, state_h, state_c = self.lstm(x, initial_state=hidden, training=training)\n context_vector, alignment = self.attention(output, enc_output)\n output = tf.concat([tf.squeeze(context_vector, 1), tf.squeeze(output, 1)], 1)\n output = self.wc(output)\n logits = self.ws(output)\n return (logits, state_h, state_c, alignment)\n<|end_body_1|>\n", "class_docstring": "Decoder of the gru with attention model.", "class_name": "Decoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decoder:\n \"\"\"Decoder of the gru with attention model.\"\"\"\n\n def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'):\n \"\"\"Create the decoder.\"\"\"\n <|body_0|>\n\n def call(self, x, hidden, enc_output, training):\n \"\"\"Call the foward past. Note that the call must be for one caracter/word at a time.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Decoder, self).__init__()\n self.attention = LuongAttention(rnn_size, attention_func)\n self.rnn_size = rnn_size\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.lstm = tf.keras.layers.LSTM(rnn_size, return_sequences=True, return_state=True)\n self.wc = tf.keras.layers.Dense(rnn_size, activation='tanh')\n self.ws = tf.keras.layers.Dense(vocab_size)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.embedding(x)\n output, state_h, state_c = self.lstm(x, initial_state=hidden, training=training)\n context_vector, alignment = self.attention(output, enc_output)\n output = tf.concat([tf.squeeze(context_vector, 1), tf.squeeze(output, 1)], 1)\n output = self.wc(output)\n logits = self.ws(output)\n return (logits, state_h, state_c, alignment)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000095", "length_bytes": 8912, "license_type": "no_license", "methods": [{"docstring": "Create the decoder.", "name": "__init__", "signature": "def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot')"}, {"docstring": "Call the foward past. Note that the call must be for one caracter/word at a time.", "name": "call", "signature": "def call(self, x, hidden, enc_output, training)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001102", "prompt": "Implement the Python class `Decoder` described below.\n\nClass description:\nDecoder of the gru with attention model.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'): Create the decoder.\n- def call(self, x, hidden, enc_output, training): Call the foward past. Note that the call must be for one caracter/word at a time.", "prompted_full_text": "Implement the Python class `Decoder` described below.\n\nClass description:\nDecoder of the gru with attention model.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'): Create the decoder.\n- def call(self, x, hidden, enc_output, training): Call the foward past. Note that the call must be for one caracter/word at a time.\n\n<|skeleton|>\nclass Decoder:\n \"\"\"Decoder of the gru with attention model.\"\"\"\n\n def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'):\n \"\"\"Create the decoder.\"\"\"\n <|body_0|>\n\n def call(self, x, hidden, enc_output, training):\n \"\"\"Call the foward past. Note that the call must be for one caracter/word at a time.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Decoder, self).__init__()\n self.attention = LuongAttention(rnn_size, attention_func)\n self.rnn_size = rnn_size\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.lstm = tf.keras.layers.LSTM(rnn_size, return_sequences=True, return_state=True)\n self.wc = tf.keras.layers.Dense(rnn_size, activation='tanh')\n self.ws = tf.keras.layers.Dense(vocab_size)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.embedding(x)\n output, state_h, state_c = self.lstm(x, initial_state=hidden, training=training)\n context_vector, alignment = self.attention(output, enc_output)\n output = tf.concat([tf.squeeze(context_vector, 1), tf.squeeze(output, 1)], 1)\n output = self.wc(output)\n logits = self.ws(output)\n return (logits, state_h, state_c, alignment)\n<|end_body_1|>\n", "revision_id": "4502d9e7461520664e72165a91bedd8e65464bae", "skeleton": "<|skeleton|>\nclass Decoder:\n \"\"\"Decoder of the gru with attention model.\"\"\"\n\n def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'):\n \"\"\"Create the decoder.\"\"\"\n <|body_0|>\n\n def call(self, x, hidden, enc_output, training):\n \"\"\"Call the foward past. Note that the call must be for one caracter/word at a time.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Decoder:\n \"\"\"Decoder of the gru with attention model.\"\"\"\n\n def __init__(self, vocab_size, embedding_dim, rnn_size, attention_func='dot'):\n \"\"\"Create the decoder.\"\"\"\n super(Decoder, self).__init__()\n self.attention = LuongAttention(rnn_size, attention_func)\n self.rnn_size = rnn_size\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.lstm = tf.keras.layers.LSTM(rnn_size, return_sequences=True, return_state=True)\n self.wc = tf.keras.layers.Dense(rnn_size, activation='tanh')\n self.ws = tf.keras.layers.Dense(vocab_size)\n\n def call(self, x, hidden, enc_output, training):\n \"\"\"Call the foward past. Note that the call must be for one caracter/word at a time.\"\"\"\n x = self.embedding(x)\n output, state_h, state_c = self.lstm(x, initial_state=hidden, training=training)\n context_vector, alignment = self.attention(output, enc_output)\n output = tf.concat([tf.squeeze(context_vector, 1), tf.squeeze(output, 1)], 1)\n output = self.wc(output)\n logits = self.ws(output)\n return (logits, state_h, state_c, alignment)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/model/lstm_luong_attention.py", "source_repo": "nathanielsimard/Low-Resource-Machine-Translation", "split": "val", "star_events_count": 0} {"blob_id": "93175f0d589c8284b9a50e8c284dad4391f32619", "bodies": ["book = Book(title, author, reader)\ncls.book_list.append(book)\nprint('书本 %s 添加成功!' % book)", "target_book = None\nfor book in cls.book_list:\n if book.title == title and book.author == author:\n target_book = book\n break\nif target_book:\n cls.book_list.remove(target_book)\n print('书本 %s 删除成功!' % title)\nelse:\n print('书本 %s 不存在!' % title)", "titles = [b.title for b in cls.book_list]\nif title in titles:\n print('书本:%s 已找到!' % title)\n return 1\nelse:\n print('书本:%s 找不到!' % title)\n return -1", "reader = Patron(name)\ncls.reader_list.append(reader)\nprint('读者 %s 添加成功!' % reader)", "target_reader = None\nfor reader in cls.reader_list:\n if reader.name == name:\n target_reader = reader\n break\nif target_reader:\n cls.reader_list.remove(target_reader)\n print('读者 %s 删除成功!' % target_reader)\nelse:\n print('该读者不存在!')", "names = [r.name for r in cls.reader_list]\nif name in names:\n print('读者:%s 已找到!' % name)\n return 1\nelse:\n print('读者:%s 找不到!' % name)\n return -1"], "bodies_text": "<|body_start_0|>\n book = Book(title, author, reader)\n cls.book_list.append(book)\n print('书本 %s 添加成功!' % book)\n<|end_body_0|>\n\n<|body_start_1|>\n target_book = None\n for book in cls.book_list:\n if book.title == title and book.author == author:\n target_book = book\n break\n if target_book:\n cls.book_list.remove(target_book)\n print('书本 %s 删除成功!' % title)\n else:\n print('书本 %s 不存在!' % title)\n<|end_body_1|>\n\n<|body_start_2|>\n titles = [b.title for b in cls.book_list]\n if title in titles:\n print('书本:%s 已找到!' % title)\n return 1\n else:\n print('书本:%s 找不到!' % title)\n return -1\n<|end_body_2|>\n\n<|body_start_3|>\n reader = Patron(name)\n cls.reader_list.append(reader)\n print('读者 %s 添加成功!' % reader)\n<|end_body_3|>\n\n<|body_start_4|>\n target_reader = None\n for reader in cls.reader_list:\n if reader.name == name:\n target_reader = reader\n break\n if target_reader:\n cls.reader_list.remove(target_reader)\n print('读者 %s 删除成功!' % target_reader)\n else:\n print('该读者不存在!')\n<|end_body_4|>\n\n<|body_start_5|>\n names = [r.name for r in cls.reader_list]\n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % name)\n return -1\n<|end_body_5|>\n", "class_docstring": "", "class_name": "Library", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Library:\n\n def add_book(cls, title, author, reader=None):\n \"\"\"添加一本书\"\"\"\n <|body_0|>\n\n def del_book(cls, title, author):\n \"\"\"删除一本书\"\"\"\n <|body_1|>\n\n def find_book(cls, title, author=None):\n \"\"\"根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\"\"\"\n <|body_2|>\n\n def add_reader(cls, name):\n \"\"\"添加一个读者\"\"\"\n <|body_3|>\n\n def del_reader(cls, name):\n \"\"\"删除一个读者\"\"\"\n <|body_4|>\n\n def find_reader(cls, name):\n \"\"\"根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n book = Book(title, author, reader)\n cls.book_list.append(book)\n print('书本 %s 添加成功!' % book)\n<|end_body_0|>\n\n<|body_start_1|>\n target_book = None\n for book in cls.book_list:\n if book.title == title and book.author == author:\n target_book = book\n break\n if target_book:\n cls.book_list.remove(target_book)\n print('书本 %s 删除成功!' % title)\n else:\n print('书本 %s 不存在!' % title)\n<|end_body_1|>\n\n<|body_start_2|>\n titles = [b.title for b in cls.book_list]\n if title in titles:\n print('书本:%s 已找到!' % title)\n return 1\n else:\n print('书本:%s 找不到!' % title)\n return -1\n<|end_body_2|>\n\n<|body_start_3|>\n reader = Patron(name)\n cls.reader_list.append(reader)\n print('读者 %s 添加成功!' % reader)\n<|end_body_3|>\n\n<|body_start_4|>\n target_reader = None\n for reader in cls.reader_list:\n if reader.name == name:\n target_reader = reader\n break\n if target_reader:\n cls.reader_list.remove(target_reader)\n print('读者 %s 删除成功!' % target_reader)\n else:\n print('该读者不存在!')\n<|end_body_4|>\n\n<|body_start_5|>\n names = [r.name for r in cls.reader_list]\n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % name)\n return -1\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000096", "length_bytes": 2899, "license_type": "no_license", "methods": [{"docstring": "添加一本书", "name": "add_book", "signature": "def add_book(cls, title, author, reader=None)"}, {"docstring": "删除一本书", "name": "del_book", "signature": "def del_book(cls, title, author)"}, {"docstring": "根据书名查找一本书 如果不存在则返回 -1,存在则返回 1", "name": "find_book", "signature": "def find_book(cls, title, author=None)"}, {"docstring": "添加一个读者", "name": "add_reader", "signature": "def add_reader(cls, name)"}, {"docstring": "删除一个读者", "name": "del_reader", "signature": "def del_reader(cls, name)"}, {"docstring": "根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1", "name": "find_reader", "signature": "def find_reader(cls, name)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_003376", "prompt": "Implement the Python class `Library` described below.\n\nClass description:\nImplement the Library class.\n\nMethod signatures and docstrings:\n- def add_book(cls, title, author, reader=None): 添加一本书\n- def del_book(cls, title, author): 删除一本书\n- def find_book(cls, title, author=None): 根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\n- def add_reader(cls, name): 添加一个读者\n- def del_reader(cls, name): 删除一个读者\n- def find_reader(cls, name): 根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1", "prompted_full_text": "Implement the Python class `Library` described below.\n\nClass description:\nImplement the Library class.\n\nMethod signatures and docstrings:\n- def add_book(cls, title, author, reader=None): 添加一本书\n- def del_book(cls, title, author): 删除一本书\n- def find_book(cls, title, author=None): 根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\n- def add_reader(cls, name): 添加一个读者\n- def del_reader(cls, name): 删除一个读者\n- def find_reader(cls, name): 根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1\n\n<|skeleton|>\nclass Library:\n\n def add_book(cls, title, author, reader=None):\n \"\"\"添加一本书\"\"\"\n <|body_0|>\n\n def del_book(cls, title, author):\n \"\"\"删除一本书\"\"\"\n <|body_1|>\n\n def find_book(cls, title, author=None):\n \"\"\"根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\"\"\"\n <|body_2|>\n\n def add_reader(cls, name):\n \"\"\"添加一个读者\"\"\"\n <|body_3|>\n\n def del_reader(cls, name):\n \"\"\"删除一个读者\"\"\"\n <|body_4|>\n\n def find_reader(cls, name):\n \"\"\"根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n book = Book(title, author, reader)\n cls.book_list.append(book)\n print('书本 %s 添加成功!' % book)\n<|end_body_0|>\n\n<|body_start_1|>\n target_book = None\n for book in cls.book_list:\n if book.title == title and book.author == author:\n target_book = book\n break\n if target_book:\n cls.book_list.remove(target_book)\n print('书本 %s 删除成功!' % title)\n else:\n print('书本 %s 不存在!' % title)\n<|end_body_1|>\n\n<|body_start_2|>\n titles = [b.title for b in cls.book_list]\n if title in titles:\n print('书本:%s 已找到!' % title)\n return 1\n else:\n print('书本:%s 找不到!' % title)\n return -1\n<|end_body_2|>\n\n<|body_start_3|>\n reader = Patron(name)\n cls.reader_list.append(reader)\n print('读者 %s 添加成功!' % reader)\n<|end_body_3|>\n\n<|body_start_4|>\n target_reader = None\n for reader in cls.reader_list:\n if reader.name == name:\n target_reader = reader\n break\n if target_reader:\n cls.reader_list.remove(target_reader)\n print('读者 %s 删除成功!' % target_reader)\n else:\n print('该读者不存在!')\n<|end_body_4|>\n\n<|body_start_5|>\n names = [r.name for r in cls.reader_list]\n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % name)\n return -1\n<|end_body_5|>\n", "revision_id": "5a562d76830faf78feec81bc11190b71eae3a799", "skeleton": "<|skeleton|>\nclass Library:\n\n def add_book(cls, title, author, reader=None):\n \"\"\"添加一本书\"\"\"\n <|body_0|>\n\n def del_book(cls, title, author):\n \"\"\"删除一本书\"\"\"\n <|body_1|>\n\n def find_book(cls, title, author=None):\n \"\"\"根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\"\"\"\n <|body_2|>\n\n def add_reader(cls, name):\n \"\"\"添加一个读者\"\"\"\n <|body_3|>\n\n def del_reader(cls, name):\n \"\"\"删除一个读者\"\"\"\n <|body_4|>\n\n def find_reader(cls, name):\n \"\"\"根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Library:\n def add_book(cls, title, author, reader=None):\n \"\"\"添加一本书\"\"\"\n book = Book(title, author, reader)\n cls.book_list.append(book)\n print('书本 %s 添加成功!' % book)\n\n def del_book(cls, title, author):\n \"\"\"删除一本书\"\"\"\n target_book = None\n for book in cls.book_list:\n if book.title == title and book.author == author:\n target_book = book\n break\n if target_book:\n cls.book_list.remove(target_book)\n print('书本 %s 删除成功!' % title)\n else:\n print('书本 %s 不存在!' % title)\n\n def find_book(cls, title, author=None):\n \"\"\"根据书名查找一本书 如果不存在则返回 -1,存在则返回 1\"\"\"\n titles = [b.title for b in cls.book_list]\n if title in titles:\n print('书本:%s 已找到!' % title)\n return 1\n else:\n print('书本:%s 找不到!' % title)\n return -1\n\n def add_reader(cls, name):\n \"\"\"添加一个读者\"\"\"\n reader = Patron(name)\n cls.reader_list.append(reader)\n print('读者 %s 添加成功!' % reader)\n\n def del_reader(cls, name):\n \"\"\"删除一个读者\"\"\"\n target_reader = None\n for reader in cls.reader_list:\n if reader.name == name:\n target_reader = reader\n break\n if target_reader:\n cls.reader_list.remove(target_reader)\n print('读者 %s 删除成功!' % target_reader)\n else:\n print('该读者不存在!')\n\n def find_reader(cls, name):\n \"\"\"根据读者名称查找一个读者 如果找到则返回 1,找不到则返回 -1\"\"\"\n names = [r.name for r in cls.reader_list]\n if name in names:\n print('读者:%s 已找到!' % name)\n return 1\n else:\n print('读者:%s 找不到!' % name)\n return -1\n", "source": "the_stack_v2_python_sparse", "source_path": "FundamentalsOfPythonDataStructures/ProgrammingProject/chapter1/project_10.py", "source_repo": "xjr7670/book_practice", "split": "val", "star_events_count": 3} {"blob_id": "5fb83e5a5a3f67df233eb30e8b4390e94e8ab460", "bodies": ["_diff = np.abs(self.dataset['TAT_DI_R'].array - self.dataset['TAT_ND_R'].array)\nmask = pd.Series(np.zeros_like(self.dataset['TAT_DI_R'].array), index=self.dataset['TAT_DI_R'].index)\nmask.loc[_diff > TEMPERATURE_THRESHOLD] = MASKED\nreturn mask", "if test:\n flag = self.test_flag\nelse:\n flag = self._get_flag()\nfor var in TEMPERATURE_VARIABLES:\n self.add_mask(var, flag, 'discrepancy threshold exceeded', f'The discrepancy between the deiced and non-deiced temperature sensors is greater than {TEMPERATURE_THRESHOLD} K.')"], "bodies_text": "<|body_start_0|>\n _diff = np.abs(self.dataset['TAT_DI_R'].array - self.dataset['TAT_ND_R'].array)\n mask = pd.Series(np.zeros_like(self.dataset['TAT_DI_R'].array), index=self.dataset['TAT_DI_R'].index)\n mask.loc[_diff > TEMPERATURE_THRESHOLD] = MASKED\n return mask\n<|end_body_0|>\n\n<|body_start_1|>\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(var, flag, 'discrepancy threshold exceeded', f'The discrepancy between the deiced and non-deiced temperature sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n<|end_body_1|>\n", "class_docstring": "This class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.", "class_name": "RosemountTempDeltaFlag", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RosemountTempDeltaFlag:\n \"\"\"This class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\"\"\"\n\n def _get_flag(self):\n \"\"\"Get the flag value for the new flag\"\"\"\n <|body_0|>\n\n def _flag(self, test=False):\n \"\"\"Entry point for the flagging module.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _diff = np.abs(self.dataset['TAT_DI_R'].array - self.dataset['TAT_ND_R'].array)\n mask = pd.Series(np.zeros_like(self.dataset['TAT_DI_R'].array), index=self.dataset['TAT_DI_R'].index)\n mask.loc[_diff > TEMPERATURE_THRESHOLD] = MASKED\n return mask\n<|end_body_0|>\n\n<|body_start_1|>\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(var, flag, 'discrepancy threshold exceeded', f'The discrepancy between the deiced and non-deiced temperature sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000097", "length_bytes": 3355, "license_type": "no_license", "methods": [{"docstring": "Get the flag value for the new flag", "name": "_get_flag", "signature": "def _get_flag(self)"}, {"docstring": "Entry point for the flagging module.", "name": "_flag", "signature": "def _flag(self, test=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003943", "prompt": "Implement the Python class `RosemountTempDeltaFlag` described below.\n\nClass description:\nThis class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\n\nMethod signatures and docstrings:\n- def _get_flag(self): Get the flag value for the new flag\n- def _flag(self, test=False): Entry point for the flagging module.", "prompted_full_text": "Implement the Python class `RosemountTempDeltaFlag` described below.\n\nClass description:\nThis class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\n\nMethod signatures and docstrings:\n- def _get_flag(self): Get the flag value for the new flag\n- def _flag(self, test=False): Entry point for the flagging module.\n\n<|skeleton|>\nclass RosemountTempDeltaFlag:\n \"\"\"This class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\"\"\"\n\n def _get_flag(self):\n \"\"\"Get the flag value for the new flag\"\"\"\n <|body_0|>\n\n def _flag(self, test=False):\n \"\"\"Entry point for the flagging module.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _diff = np.abs(self.dataset['TAT_DI_R'].array - self.dataset['TAT_ND_R'].array)\n mask = pd.Series(np.zeros_like(self.dataset['TAT_DI_R'].array), index=self.dataset['TAT_DI_R'].index)\n mask.loc[_diff > TEMPERATURE_THRESHOLD] = MASKED\n return mask\n<|end_body_0|>\n\n<|body_start_1|>\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(var, flag, 'discrepancy threshold exceeded', f'The discrepancy between the deiced and non-deiced temperature sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n<|end_body_1|>\n", "revision_id": "e8c54f78a97166c5f66b2196ea4d6eb7a33a0bc4", "skeleton": "<|skeleton|>\nclass RosemountTempDeltaFlag:\n \"\"\"This class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\"\"\"\n\n def _get_flag(self):\n \"\"\"Get the flag value for the new flag\"\"\"\n <|body_0|>\n\n def _flag(self, test=False):\n \"\"\"Entry point for the flagging module.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RosemountTempDeltaFlag:\n \"\"\"This class adds a flag to the rosemount temperatures if the two temperatures disagree by more than a given absolute value. This is given by the module constant TEMPERATURE_THRESHOLD.\"\"\"\n\n def _get_flag(self):\n \"\"\"Get the flag value for the new flag\"\"\"\n _diff = np.abs(self.dataset['TAT_DI_R'].array - self.dataset['TAT_ND_R'].array)\n mask = pd.Series(np.zeros_like(self.dataset['TAT_DI_R'].array), index=self.dataset['TAT_DI_R'].index)\n mask.loc[_diff > TEMPERATURE_THRESHOLD] = MASKED\n return mask\n\n def _flag(self, test=False):\n \"\"\"Entry point for the flagging module.\"\"\"\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(var, flag, 'discrepancy threshold exceeded', f'The discrepancy between the deiced and non-deiced temperature sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n", "source": "the_stack_v2_python_sparse", "source_path": "ppodd/flags/p_rosemount_temps.py", "source_repo": "FAAM-146/decades-ppandas", "split": "val", "star_events_count": 0} {"blob_id": "6cfc156d7435c579ca0fef2d7ad88d03c723d482", "bodies": ["stack = []\ntemp = self.head\nwhile temp:\n stack.append(temp.data)\n temp = temp.next\ntemp = self.head\nwhile temp:\n if temp.data != stack[0]:\n return False\n stack.pop(0)\n temp = temp.next\nreturn True", "temp1 = first\ntemp2 = second\nwhile temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\nif not temp1 and (not temp2):\n return True\nreturn False", "slow = self.head\nfast = self.head\nmidnode = None\nprev_to_slow = None\nwhile fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\nif fast:\n midnode = slow\n slow = slow.next\nprev_to_slow.next = None\nsecond_half = slow\nsecond_half = LinkedListReverse.iterative_reverse(second_half)\nres = CheckPalindrome.compare_list(self.head, second_half)\nsecond_half = LinkedListReverse.iterative_reverse(second_half)\nif midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\nelse:\n prev_to_slow.next = second_half\nreturn res"], "bodies_text": "<|body_start_0|>\n stack = []\n temp = self.head\n while temp:\n stack.append(temp.data)\n temp = temp.next\n temp = self.head\n while temp:\n if temp.data != stack[0]:\n return False\n stack.pop(0)\n temp = temp.next\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n temp1 = first\n temp2 = second\n while temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\n if not temp1 and (not temp2):\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "CheckPalindrome", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CheckPalindrome:\n\n def check_palindrome_using_stack(self):\n \"\"\"Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\"\"\"\n <|body_0|>\n\n def compare_list(first, second):\n \"\"\"Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\"\"\"\n <|body_1|>\n\n def check_palindrome_using_reverse(self):\n \"\"\"Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = []\n temp = self.head\n while temp:\n stack.append(temp.data)\n temp = temp.next\n temp = self.head\n while temp:\n if temp.data != stack[0]:\n return False\n stack.pop(0)\n temp = temp.next\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n temp1 = first\n temp2 = second\n while temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\n if not temp1 and (not temp2):\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000098", "length_bytes": 2715, "license_type": "no_license", "methods": [{"docstring": "Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool", "name": "check_palindrome_using_stack", "signature": "def check_palindrome_using_stack(self)"}, {"docstring": "Function to compare elements of the two list :param first: Node :param second: Node :return: Bool", "name": "compare_list", "signature": "def compare_list(first, second)"}, {"docstring": "Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool", "name": "check_palindrome_using_reverse", "signature": "def check_palindrome_using_reverse(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000256", "prompt": "Implement the Python class `CheckPalindrome` described below.\n\nClass description:\nImplement the CheckPalindrome class.\n\nMethod signatures and docstrings:\n- def check_palindrome_using_stack(self): Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\n- def compare_list(first, second): Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\n- def check_palindrome_using_reverse(self): Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool", "prompted_full_text": "Implement the Python class `CheckPalindrome` described below.\n\nClass description:\nImplement the CheckPalindrome class.\n\nMethod signatures and docstrings:\n- def check_palindrome_using_stack(self): Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\n- def compare_list(first, second): Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\n- def check_palindrome_using_reverse(self): Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool\n\n<|skeleton|>\nclass CheckPalindrome:\n\n def check_palindrome_using_stack(self):\n \"\"\"Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\"\"\"\n <|body_0|>\n\n def compare_list(first, second):\n \"\"\"Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\"\"\"\n <|body_1|>\n\n def check_palindrome_using_reverse(self):\n \"\"\"Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = []\n temp = self.head\n while temp:\n stack.append(temp.data)\n temp = temp.next\n temp = self.head\n while temp:\n if temp.data != stack[0]:\n return False\n stack.pop(0)\n temp = temp.next\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n temp1 = first\n temp2 = second\n while temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\n if not temp1 and (not temp2):\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res\n<|end_body_2|>\n", "revision_id": "7e484faa5c75e690f2cb33ee95eedf4472c0089b", "skeleton": "<|skeleton|>\nclass CheckPalindrome:\n\n def check_palindrome_using_stack(self):\n \"\"\"Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\"\"\"\n <|body_0|>\n\n def compare_list(first, second):\n \"\"\"Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\"\"\"\n <|body_1|>\n\n def check_palindrome_using_reverse(self):\n \"\"\"Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CheckPalindrome:\n def check_palindrome_using_stack(self):\n \"\"\"Function to check whether elements in linked list form a palindrome of not. It traverses the linked list twice, once to push all elements in stack and other to verify elements in the stack :return: Bool\"\"\"\n stack = []\n temp = self.head\n while temp:\n stack.append(temp.data)\n temp = temp.next\n temp = self.head\n while temp:\n if temp.data != stack[0]:\n return False\n stack.pop(0)\n temp = temp.next\n return True\n\n def compare_list(first, second):\n \"\"\"Function to compare elements of the two list :param first: Node :param second: Node :return: Bool\"\"\"\n temp1 = first\n temp2 = second\n while temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\n if not temp1 and (not temp2):\n return True\n return False\n\n def check_palindrome_using_reverse(self):\n \"\"\"Function to check whether elements in the liked list form a palindrome or not. This function gets to the middle of linked list and reverses the second half to check it it matches with the first half. It reverses the second half again to form the second half :return: Bool\"\"\"\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "linkedlists/singly_linked_list/check_palindrome_in_linkedlist.py", "source_repo": "sunny0910/Data-Structures-Algorithms", "split": "val", "star_events_count": 5} {"blob_id": "b9cf7aced1761cf6944eb88333cd328136ade255", "bodies": ["res = True\npoint = [p.x, p.y, p.z]\nsize = data.size * data.nodemat * data.scale\nsize -= data.nodemat.off\nsize = [size.x, size.y, size.z]\noffset = [data.offset.x, data.offset.y, data.offset.z]\npos = c4d.Vector() * data.nodemat\npos = [pos.x, pos.y, pos.z]\nfor i in range(3):\n res = pos[i] + offset[i] + size[i] > point[i] > pos[i] + offset[i] - size[i]\n if not res:\n break\nreturn res", "bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))\np1, p2, p3, p4 = (c4d.Vector(), c4d.Vector(), c4d.Vector(), c4d.Vector())\nif i == 0:\n p1 = c4d.Vector(0, -size.y, -size.z)\n p2 = c4d.Vector(0, -size.y, size.z)\n p3 = c4d.Vector(0, size.y, size.z)\n p4 = c4d.Vector(0, size.y, -size.z)\nelif i == 1:\n p1 = c4d.Vector(-size.x, 0, -size.z)\n p2 = c4d.Vector(-size.x, 0, size.z)\n p3 = c4d.Vector(size.x, 0, size.z)\n p4 = c4d.Vector(size.x, 0, -size.z)\nelif i == 2:\n p1 = c4d.Vector(-size.x, -size.y, 0)\n p2 = c4d.Vector(-size.x, size.y, 0)\n p3 = c4d.Vector(size.x, size.y, 0)\n p4 = c4d.Vector(size.x, -size.y, 0)\nbd.DrawLine(p1, p2, 0)\nbd.DrawLine(p2, p3, 0)\nbd.DrawLine(p3, p4, 0)\nbd.DrawLine(p4, p1, 0)"], "bodies_text": "<|body_start_0|>\n res = True\n point = [p.x, p.y, p.z]\n size = data.size * data.nodemat * data.scale\n size -= data.nodemat.off\n size = [size.x, size.y, size.z]\n offset = [data.offset.x, data.offset.y, data.offset.z]\n pos = c4d.Vector() * data.nodemat\n pos = [pos.x, pos.y, pos.z]\n for i in range(3):\n res = pos[i] + offset[i] + size[i] > point[i] > pos[i] + offset[i] - size[i]\n if not res:\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))\n p1, p2, p3, p4 = (c4d.Vector(), c4d.Vector(), c4d.Vector(), c4d.Vector())\n if i == 0:\n p1 = c4d.Vector(0, -size.y, -size.z)\n p2 = c4d.Vector(0, -size.y, size.z)\n p3 = c4d.Vector(0, size.y, size.z)\n p4 = c4d.Vector(0, size.y, -size.z)\n elif i == 1:\n p1 = c4d.Vector(-size.x, 0, -size.z)\n p2 = c4d.Vector(-size.x, 0, size.z)\n p3 = c4d.Vector(size.x, 0, size.z)\n p4 = c4d.Vector(size.x, 0, -size.z)\n elif i == 2:\n p1 = c4d.Vector(-size.x, -size.y, 0)\n p2 = c4d.Vector(-size.x, size.y, 0)\n p3 = c4d.Vector(size.x, size.y, 0)\n p4 = c4d.Vector(size.x, -size.y, 0)\n bd.DrawLine(p1, p2, 0)\n bd.DrawLine(p2, p3, 0)\n bd.DrawLine(p3, p4, 0)\n bd.DrawLine(p4, p1, 0)\n<|end_body_1|>\n", "class_docstring": "Utility class for the noise falloff", "class_name": "NoiseFalloffHelper", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NoiseFalloffHelper:\n \"\"\"Utility class for the noise falloff\"\"\"\n\n def PointInBox(p, data):\n \"\"\"Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\"\"\"\n <|body_0|>\n\n def DrawHandleLines(bd, size, i):\n \"\"\"Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = True\n point = [p.x, p.y, p.z]\n size = data.size * data.nodemat * data.scale\n size -= data.nodemat.off\n size = [size.x, size.y, size.z]\n offset = [data.offset.x, data.offset.y, data.offset.z]\n pos = c4d.Vector() * data.nodemat\n pos = [pos.x, pos.y, pos.z]\n for i in range(3):\n res = pos[i] + offset[i] + size[i] > point[i] > pos[i] + offset[i] - size[i]\n if not res:\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))\n p1, p2, p3, p4 = (c4d.Vector(), c4d.Vector(), c4d.Vector(), c4d.Vector())\n if i == 0:\n p1 = c4d.Vector(0, -size.y, -size.z)\n p2 = c4d.Vector(0, -size.y, size.z)\n p3 = c4d.Vector(0, size.y, size.z)\n p4 = c4d.Vector(0, size.y, -size.z)\n elif i == 1:\n p1 = c4d.Vector(-size.x, 0, -size.z)\n p2 = c4d.Vector(-size.x, 0, size.z)\n p3 = c4d.Vector(size.x, 0, size.z)\n p4 = c4d.Vector(size.x, 0, -size.z)\n elif i == 2:\n p1 = c4d.Vector(-size.x, -size.y, 0)\n p2 = c4d.Vector(-size.x, size.y, 0)\n p3 = c4d.Vector(size.x, size.y, 0)\n p4 = c4d.Vector(size.x, -size.y, 0)\n bd.DrawLine(p1, p2, 0)\n bd.DrawLine(p2, p3, 0)\n bd.DrawLine(p3, p4, 0)\n bd.DrawLine(p4, p1, 0)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000099", "length_bytes": 13785, "license_type": "permissive", "methods": [{"docstring": "Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False", "name": "PointInBox", "signature": "def PointInBox(p, data)"}, {"docstring": "Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id", "name": "DrawHandleLines", "signature": "def DrawHandleLines(bd, size, i)"}], "n_methods": 2, "prompt": "Implement the Python class `NoiseFalloffHelper` described below.\n\nClass description:\nUtility class for the noise falloff\n\nMethod signatures and docstrings:\n- def PointInBox(p, data): Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\n- def DrawHandleLines(bd, size, i): Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id", "prompted_full_text": "Implement the Python class `NoiseFalloffHelper` described below.\n\nClass description:\nUtility class for the noise falloff\n\nMethod signatures and docstrings:\n- def PointInBox(p, data): Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\n- def DrawHandleLines(bd, size, i): Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id\n\n<|skeleton|>\nclass NoiseFalloffHelper:\n \"\"\"Utility class for the noise falloff\"\"\"\n\n def PointInBox(p, data):\n \"\"\"Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\"\"\"\n <|body_0|>\n\n def DrawHandleLines(bd, size, i):\n \"\"\"Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = True\n point = [p.x, p.y, p.z]\n size = data.size * data.nodemat * data.scale\n size -= data.nodemat.off\n size = [size.x, size.y, size.z]\n offset = [data.offset.x, data.offset.y, data.offset.z]\n pos = c4d.Vector() * data.nodemat\n pos = [pos.x, pos.y, pos.z]\n for i in range(3):\n res = pos[i] + offset[i] + size[i] > point[i] > pos[i] + offset[i] - size[i]\n if not res:\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))\n p1, p2, p3, p4 = (c4d.Vector(), c4d.Vector(), c4d.Vector(), c4d.Vector())\n if i == 0:\n p1 = c4d.Vector(0, -size.y, -size.z)\n p2 = c4d.Vector(0, -size.y, size.z)\n p3 = c4d.Vector(0, size.y, size.z)\n p4 = c4d.Vector(0, size.y, -size.z)\n elif i == 1:\n p1 = c4d.Vector(-size.x, 0, -size.z)\n p2 = c4d.Vector(-size.x, 0, size.z)\n p3 = c4d.Vector(size.x, 0, size.z)\n p4 = c4d.Vector(size.x, 0, -size.z)\n elif i == 2:\n p1 = c4d.Vector(-size.x, -size.y, 0)\n p2 = c4d.Vector(-size.x, size.y, 0)\n p3 = c4d.Vector(size.x, size.y, 0)\n p4 = c4d.Vector(size.x, -size.y, 0)\n bd.DrawLine(p1, p2, 0)\n bd.DrawLine(p2, p3, 0)\n bd.DrawLine(p3, p4, 0)\n bd.DrawLine(p4, p1, 0)\n<|end_body_1|>\n", "revision_id": "b1ea3fce533df34094bc3d0bd6460dfb84306e53", "skeleton": "<|skeleton|>\nclass NoiseFalloffHelper:\n \"\"\"Utility class for the noise falloff\"\"\"\n\n def PointInBox(p, data):\n \"\"\"Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\"\"\"\n <|body_0|>\n\n def DrawHandleLines(bd, size, i):\n \"\"\"Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NoiseFalloffHelper:\n \"\"\"Utility class for the noise falloff\"\"\"\n\n def PointInBox(p, data):\n \"\"\"Returns if a point is in box. Args: p (c4d.Vector): The point position. data (c4d.BaseContainer): Falloff data information. Returns: True if the point is in box, otherwise False\"\"\"\n res = True\n point = [p.x, p.y, p.z]\n size = data.size * data.nodemat * data.scale\n size -= data.nodemat.off\n size = [size.x, size.y, size.z]\n offset = [data.offset.x, data.offset.y, data.offset.z]\n pos = c4d.Vector() * data.nodemat\n pos = [pos.x, pos.y, pos.z]\n for i in range(3):\n res = pos[i] + offset[i] + size[i] > point[i] > pos[i] + offset[i] - size[i]\n if not res:\n break\n return res\n\n def DrawHandleLines(bd, size, i):\n \"\"\"Helper method to draw a handle. Args: bd (c4d.BaseDraw): The editor's view. size (c4d.Vector): vector size of the object i (int): The Handle Id\"\"\"\n bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))\n p1, p2, p3, p4 = (c4d.Vector(), c4d.Vector(), c4d.Vector(), c4d.Vector())\n if i == 0:\n p1 = c4d.Vector(0, -size.y, -size.z)\n p2 = c4d.Vector(0, -size.y, size.z)\n p3 = c4d.Vector(0, size.y, size.z)\n p4 = c4d.Vector(0, size.y, -size.z)\n elif i == 1:\n p1 = c4d.Vector(-size.x, 0, -size.z)\n p2 = c4d.Vector(-size.x, 0, size.z)\n p3 = c4d.Vector(size.x, 0, size.z)\n p4 = c4d.Vector(size.x, 0, -size.z)\n elif i == 2:\n p1 = c4d.Vector(-size.x, -size.y, 0)\n p2 = c4d.Vector(-size.x, size.y, 0)\n p3 = c4d.Vector(size.x, size.y, 0)\n p4 = c4d.Vector(size.x, -size.y, 0)\n bd.DrawLine(p1, p2, 0)\n bd.DrawLine(p2, p3, 0)\n bd.DrawLine(p3, p4, 0)\n bd.DrawLine(p4, p1, 0)\n", "source": "the_stack_v2_python_sparse", "source_path": "plugins/py-noise_falloff_r14/py-noise_falloff_r14.pyp", "source_repo": "PluginCafe/cinema4d_py_sdk_extended", "split": "val", "star_events_count": 112} {"blob_id": "2044a29673fe0d4bcf1ab4a40ef659a13bdffe09", "bodies": ["if self.has_next():\n return self.paginator.link_template % (self.number + 1)\nreturn None", "if self.has_previous():\n return self.paginator.link_template % (self.number - 1)\nreturn None", "offset = self.paginator.offset\nif offset is None:\n raise ValueError(\"Can't determine start index of paginator with no offset\")\nreturn offset"], "bodies_text": "<|body_start_0|>\n if self.has_next():\n return self.paginator.link_template % (self.number + 1)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_previous():\n return self.paginator.link_template % (self.number - 1)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n offset = self.paginator.offset\n if offset is None:\n raise ValueError(\"Can't determine start index of paginator with no offset\")\n return offset\n<|end_body_2|>\n", "class_docstring": "", "class_name": "FinitePage", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FinitePage:\n\n def next_link(self):\n \"\"\"URL for the next page of results (or None).\"\"\"\n <|body_0|>\n\n def previous_link(self):\n \"\"\"URL for the previous page of results (or None).\"\"\"\n <|body_1|>\n\n def start_index(self):\n \"\"\"Returns the 1-based index of the first object on this page, relative to total objects in the paginator.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_next():\n return self.paginator.link_template % (self.number + 1)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_previous():\n return self.paginator.link_template % (self.number - 1)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n offset = self.paginator.offset\n if offset is None:\n raise ValueError(\"Can't determine start index of paginator with no offset\")\n return offset\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000100", "length_bytes": 4055, "license_type": "permissive", "methods": [{"docstring": "URL for the next page of results (or None).", "name": "next_link", "signature": "def next_link(self)"}, {"docstring": "URL for the previous page of results (or None).", "name": "previous_link", "signature": "def previous_link(self)"}, {"docstring": "Returns the 1-based index of the first object on this page, relative to total objects in the paginator.", "name": "start_index", "signature": "def start_index(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000527", "prompt": "Implement the Python class `FinitePage` described below.\n\nClass description:\nImplement the FinitePage class.\n\nMethod signatures and docstrings:\n- def next_link(self): URL for the next page of results (or None).\n- def previous_link(self): URL for the previous page of results (or None).\n- def start_index(self): Returns the 1-based index of the first object on this page, relative to total objects in the paginator.", "prompted_full_text": "Implement the Python class `FinitePage` described below.\n\nClass description:\nImplement the FinitePage class.\n\nMethod signatures and docstrings:\n- def next_link(self): URL for the next page of results (or None).\n- def previous_link(self): URL for the previous page of results (or None).\n- def start_index(self): Returns the 1-based index of the first object on this page, relative to total objects in the paginator.\n\n<|skeleton|>\nclass FinitePage:\n\n def next_link(self):\n \"\"\"URL for the next page of results (or None).\"\"\"\n <|body_0|>\n\n def previous_link(self):\n \"\"\"URL for the previous page of results (or None).\"\"\"\n <|body_1|>\n\n def start_index(self):\n \"\"\"Returns the 1-based index of the first object on this page, relative to total objects in the paginator.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_next():\n return self.paginator.link_template % (self.number + 1)\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_previous():\n return self.paginator.link_template % (self.number - 1)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n offset = self.paginator.offset\n if offset is None:\n raise ValueError(\"Can't determine start index of paginator with no offset\")\n return offset\n<|end_body_2|>\n", "revision_id": "e8e43df7d1930398a3af2ea8755bd7b6a44b4385", "skeleton": "<|skeleton|>\nclass FinitePage:\n\n def next_link(self):\n \"\"\"URL for the next page of results (or None).\"\"\"\n <|body_0|>\n\n def previous_link(self):\n \"\"\"URL for the previous page of results (or None).\"\"\"\n <|body_1|>\n\n def start_index(self):\n \"\"\"Returns the 1-based index of the first object on this page, relative to total objects in the paginator.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FinitePage:\n def next_link(self):\n \"\"\"URL for the next page of results (or None).\"\"\"\n if self.has_next():\n return self.paginator.link_template % (self.number + 1)\n return None\n\n def previous_link(self):\n \"\"\"URL for the previous page of results (or None).\"\"\"\n if self.has_previous():\n return self.paginator.link_template % (self.number - 1)\n return None\n\n def start_index(self):\n \"\"\"Returns the 1-based index of the first object on this page, relative to total objects in the paginator.\"\"\"\n offset = self.paginator.offset\n if offset is None:\n raise ValueError(\"Can't determine start index of paginator with no offset\")\n return offset\n", "source": "the_stack_v2_python_sparse", "source_path": "typepadapp/utils/paginator.py", "source_repo": "sivy/typepadapp", "split": "val", "star_events_count": 0} {"blob_id": "f01b95eb85ae3affcc8d4fce1512af20d772dd75", "bodies": ["super().validate(data)\nif not data.get('cost_type'):\n data['cost_type'] = get_cost_type(self.context.get('request'))\nerror = {}\nif 'delta' in data.get('order_by', {}) and 'delta' not in data:\n error['order_by'] = _('Cannot order by delta without a delta param')\n raise serializers.ValidationError(error)\nreturn data", "max_value = get_customer_group_by_limit(self.schema)\nif len(value) > max_value:\n error = {'group_by': f'Cost Management supports a max of {max_value} group_by options.'}\n raise serializers.ValidationError(error)\nvalidate_field(self, 'group_by', self.GROUP_BY_SERIALIZER, value, tag_keys=self.tag_keys)\ngroup_by_params = self.initial_data.get('group_by', {})\norg_unit_group_keys = ['org_unit_id', 'or:org_unit_id']\ngroup_by_keys = group_by_params.keys()\nkey_used = []\nfor acceptable_key in org_unit_group_keys:\n if acceptable_key in group_by_keys:\n key_used.append(acceptable_key)\nif key_used:\n if len(key_used) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n key_used = key_used[0]\n request = self.context.get('request')\n if 'costs' not in request.path or self.initial_data.get('group_by', {}).get(key_used, '') == '*':\n error = {'org_unit_id': _('Unsupported parameter or invalid value')}\n raise serializers.ValidationError(error)\n if 'or:' not in key_used:\n if isinstance(group_by_params.get(key_used), list):\n if len(group_by_params.get(key_used)) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\nreturn value"], "bodies_text": "<|body_start_0|>\n super().validate(data)\n if not data.get('cost_type'):\n data['cost_type'] = get_cost_type(self.context.get('request'))\n error = {}\n if 'delta' in data.get('order_by', {}) and 'delta' not in data:\n error['order_by'] = _('Cannot order by delta without a delta param')\n raise serializers.ValidationError(error)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n max_value = get_customer_group_by_limit(self.schema)\n if len(value) > max_value:\n error = {'group_by': f'Cost Management supports a max of {max_value} group_by options.'}\n raise serializers.ValidationError(error)\n validate_field(self, 'group_by', self.GROUP_BY_SERIALIZER, value, tag_keys=self.tag_keys)\n group_by_params = self.initial_data.get('group_by', {})\n org_unit_group_keys = ['org_unit_id', 'or:org_unit_id']\n group_by_keys = group_by_params.keys()\n key_used = []\n for acceptable_key in org_unit_group_keys:\n if acceptable_key in group_by_keys:\n key_used.append(acceptable_key)\n if key_used:\n if len(key_used) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n key_used = key_used[0]\n request = self.context.get('request')\n if 'costs' not in request.path or self.initial_data.get('group_by', {}).get(key_used, '') == '*':\n error = {'org_unit_id': _('Unsupported parameter or invalid value')}\n raise serializers.ValidationError(error)\n if 'or:' not in key_used:\n if isinstance(group_by_params.get(key_used), list):\n if len(group_by_params.get(key_used)) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n return value\n<|end_body_1|>\n", "class_docstring": "Serializer for handling query parameters.", "class_name": "AWSQueryParamSerializer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AWSQueryParamSerializer:\n \"\"\"Serializer for handling query parameters.\"\"\"\n\n def validate(self, data):\n \"\"\"Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\"\"\"\n <|body_0|>\n\n def validate_group_by(self, value):\n \"\"\"Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().validate(data)\n if not data.get('cost_type'):\n data['cost_type'] = get_cost_type(self.context.get('request'))\n error = {}\n if 'delta' in data.get('order_by', {}) and 'delta' not in data:\n error['order_by'] = _('Cannot order by delta without a delta param')\n raise serializers.ValidationError(error)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n max_value = get_customer_group_by_limit(self.schema)\n if len(value) > max_value:\n error = {'group_by': f'Cost Management supports a max of {max_value} group_by options.'}\n raise serializers.ValidationError(error)\n validate_field(self, 'group_by', self.GROUP_BY_SERIALIZER, value, tag_keys=self.tag_keys)\n group_by_params = self.initial_data.get('group_by', {})\n org_unit_group_keys = ['org_unit_id', 'or:org_unit_id']\n group_by_keys = group_by_params.keys()\n key_used = []\n for acceptable_key in org_unit_group_keys:\n if acceptable_key in group_by_keys:\n key_used.append(acceptable_key)\n if key_used:\n if len(key_used) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n key_used = key_used[0]\n request = self.context.get('request')\n if 'costs' not in request.path or self.initial_data.get('group_by', {}).get(key_used, '') == '*':\n error = {'org_unit_id': _('Unsupported parameter or invalid value')}\n raise serializers.ValidationError(error)\n if 'or:' not in key_used:\n if isinstance(group_by_params.get(key_used), list):\n if len(group_by_params.get(key_used)) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n return value\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000101", "length_bytes": 8520, "license_type": "permissive", "methods": [{"docstring": "Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid", "name": "validate", "signature": "def validate(self, data)"}, {"docstring": "Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid", "name": "validate_group_by", "signature": "def validate_group_by(self, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001848", "prompt": "Implement the Python class `AWSQueryParamSerializer` described below.\n\nClass description:\nSerializer for handling query parameters.\n\nMethod signatures and docstrings:\n- def validate(self, data): Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\n- def validate_group_by(self, value): Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid", "prompted_full_text": "Implement the Python class `AWSQueryParamSerializer` described below.\n\nClass description:\nSerializer for handling query parameters.\n\nMethod signatures and docstrings:\n- def validate(self, data): Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\n- def validate_group_by(self, value): Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid\n\n<|skeleton|>\nclass AWSQueryParamSerializer:\n \"\"\"Serializer for handling query parameters.\"\"\"\n\n def validate(self, data):\n \"\"\"Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\"\"\"\n <|body_0|>\n\n def validate_group_by(self, value):\n \"\"\"Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().validate(data)\n if not data.get('cost_type'):\n data['cost_type'] = get_cost_type(self.context.get('request'))\n error = {}\n if 'delta' in data.get('order_by', {}) and 'delta' not in data:\n error['order_by'] = _('Cannot order by delta without a delta param')\n raise serializers.ValidationError(error)\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n max_value = get_customer_group_by_limit(self.schema)\n if len(value) > max_value:\n error = {'group_by': f'Cost Management supports a max of {max_value} group_by options.'}\n raise serializers.ValidationError(error)\n validate_field(self, 'group_by', self.GROUP_BY_SERIALIZER, value, tag_keys=self.tag_keys)\n group_by_params = self.initial_data.get('group_by', {})\n org_unit_group_keys = ['org_unit_id', 'or:org_unit_id']\n group_by_keys = group_by_params.keys()\n key_used = []\n for acceptable_key in org_unit_group_keys:\n if acceptable_key in group_by_keys:\n key_used.append(acceptable_key)\n if key_used:\n if len(key_used) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n key_used = key_used[0]\n request = self.context.get('request')\n if 'costs' not in request.path or self.initial_data.get('group_by', {}).get(key_used, '') == '*':\n error = {'org_unit_id': _('Unsupported parameter or invalid value')}\n raise serializers.ValidationError(error)\n if 'or:' not in key_used:\n if isinstance(group_by_params.get(key_used), list):\n if len(group_by_params.get(key_used)) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n return value\n<|end_body_1|>\n", "revision_id": "0416e5216eb1ec4b41c8dd4999adde218b1ab2e1", "skeleton": "<|skeleton|>\nclass AWSQueryParamSerializer:\n \"\"\"Serializer for handling query parameters.\"\"\"\n\n def validate(self, data):\n \"\"\"Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\"\"\"\n <|body_0|>\n\n def validate_group_by(self, value):\n \"\"\"Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AWSQueryParamSerializer:\n \"\"\"Serializer for handling query parameters.\"\"\"\n\n def validate(self, data):\n \"\"\"Validate incoming data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if field inputs are invalid\"\"\"\n super().validate(data)\n if not data.get('cost_type'):\n data['cost_type'] = get_cost_type(self.context.get('request'))\n error = {}\n if 'delta' in data.get('order_by', {}) and 'delta' not in data:\n error['order_by'] = _('Cannot order by delta without a delta param')\n raise serializers.ValidationError(error)\n return data\n\n def validate_group_by(self, value):\n \"\"\"Validate incoming group_by data. Args: data (Dict): data to be validated Returns: (Dict): Validated data Raises: (ValidationError): if group_by field inputs are invalid\"\"\"\n max_value = get_customer_group_by_limit(self.schema)\n if len(value) > max_value:\n error = {'group_by': f'Cost Management supports a max of {max_value} group_by options.'}\n raise serializers.ValidationError(error)\n validate_field(self, 'group_by', self.GROUP_BY_SERIALIZER, value, tag_keys=self.tag_keys)\n group_by_params = self.initial_data.get('group_by', {})\n org_unit_group_keys = ['org_unit_id', 'or:org_unit_id']\n group_by_keys = group_by_params.keys()\n key_used = []\n for acceptable_key in org_unit_group_keys:\n if acceptable_key in group_by_keys:\n key_used.append(acceptable_key)\n if key_used:\n if len(key_used) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n key_used = key_used[0]\n request = self.context.get('request')\n if 'costs' not in request.path or self.initial_data.get('group_by', {}).get(key_used, '') == '*':\n error = {'org_unit_id': _('Unsupported parameter or invalid value')}\n raise serializers.ValidationError(error)\n if 'or:' not in key_used:\n if isinstance(group_by_params.get(key_used), list):\n if len(group_by_params.get(key_used)) > 1:\n error = {'or_unit_id': _('Multiple org_unit_id must be represented with the or: prefix.')}\n raise serializers.ValidationError(error)\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "koku/api/report/aws/serializers.py", "source_repo": "project-koku/koku", "split": "val", "star_events_count": 225} {"blob_id": "740a08a2d13f21b2d2207e5ba94cabce294b0b7c", "bodies": ["super(KernelVar, self).__init__()\nself.embd_dim = embd_dim\nself.hidden_dim = hidden_dim\nself.kernel_dim = kernel_dim\nself.layer1 = nn.Linear(2 * embd_dim, hidden_dim)\nself.layer2 = nn.Linear(hidden_dim, hidden_dim)\nself.layer3 = nn.Linear(hidden_dim, kernel_dim)\nself.net = nn.Sequential(self.layer1, nn.ReLU(), self.layer2, nn.ReLU(), self.layer3)\nself.s_ix = []\nself.e_ix = []\nself.lengths = None", "batch_size, max_set_size, embd_dim = words.size()\nself.lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)\ncontext = (words.sum(1, keepdim=True) / self.lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)\nmask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()\nwords = words.masked_select(Variable(mask)).view(-1, embd_dim)\ncontext = context.masked_select(Variable(mask)).view(-1, embd_dim)\nbatch_x = torch.cat([words, context], dim=1)\nbatch_kernel = self.net(batch_x)\nself.s_ix = list(self.lengths.squeeze().cumsum(0).long().data - self.lengths.squeeze().long().data)\nself.e_ix = list(self.lengths.squeeze().cumsum(0).long().data)\nreturn (batch_kernel, words)"], "bodies_text": "<|body_start_0|>\n super(KernelVar, self).__init__()\n self.embd_dim = embd_dim\n self.hidden_dim = hidden_dim\n self.kernel_dim = kernel_dim\n self.layer1 = nn.Linear(2 * embd_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, hidden_dim)\n self.layer3 = nn.Linear(hidden_dim, kernel_dim)\n self.net = nn.Sequential(self.layer1, nn.ReLU(), self.layer2, nn.ReLU(), self.layer3)\n self.s_ix = []\n self.e_ix = []\n self.lengths = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, max_set_size, embd_dim = words.size()\n self.lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)\n context = (words.sum(1, keepdim=True) / self.lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)\n mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()\n words = words.masked_select(Variable(mask)).view(-1, embd_dim)\n context = context.masked_select(Variable(mask)).view(-1, embd_dim)\n batch_x = torch.cat([words, context], dim=1)\n batch_kernel = self.net(batch_x)\n self.s_ix = list(self.lengths.squeeze().cumsum(0).long().data - self.lengths.squeeze().long().data)\n self.e_ix = list(self.lengths.squeeze().cumsum(0).long().data)\n return (batch_kernel, words)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KernelVar", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KernelVar:\n\n def __init__(self, embd_dim, hidden_dim, kernel_dim):\n \"\"\"Currently, this creates a 2-hidden-layer network with ELU non-linearities.\"\"\"\n <|body_0|>\n\n def forward(self, words):\n \"\"\"Given words, returns batch_kernel of dimension [-1, kernel_dim]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(KernelVar, self).__init__()\n self.embd_dim = embd_dim\n self.hidden_dim = hidden_dim\n self.kernel_dim = kernel_dim\n self.layer1 = nn.Linear(2 * embd_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, hidden_dim)\n self.layer3 = nn.Linear(hidden_dim, kernel_dim)\n self.net = nn.Sequential(self.layer1, nn.ReLU(), self.layer2, nn.ReLU(), self.layer3)\n self.s_ix = []\n self.e_ix = []\n self.lengths = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, max_set_size, embd_dim = words.size()\n self.lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)\n context = (words.sum(1, keepdim=True) / self.lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)\n mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()\n words = words.masked_select(Variable(mask)).view(-1, embd_dim)\n context = context.masked_select(Variable(mask)).view(-1, embd_dim)\n batch_x = torch.cat([words, context], dim=1)\n batch_kernel = self.net(batch_x)\n self.s_ix = list(self.lengths.squeeze().cumsum(0).long().data - self.lengths.squeeze().long().data)\n self.e_ix = list(self.lengths.squeeze().cumsum(0).long().data)\n return (batch_kernel, words)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000102", "length_bytes": 30546, "license_type": "permissive", "methods": [{"docstring": "Currently, this creates a 2-hidden-layer network with ELU non-linearities.", "name": "__init__", "signature": "def __init__(self, embd_dim, hidden_dim, kernel_dim)"}, {"docstring": "Given words, returns batch_kernel of dimension [-1, kernel_dim]", "name": "forward", "signature": "def forward(self, words)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003088", "prompt": "Implement the Python class `KernelVar` described below.\n\nClass description:\nImplement the KernelVar class.\n\nMethod signatures and docstrings:\n- def __init__(self, embd_dim, hidden_dim, kernel_dim): Currently, this creates a 2-hidden-layer network with ELU non-linearities.\n- def forward(self, words): Given words, returns batch_kernel of dimension [-1, kernel_dim]", "prompted_full_text": "Implement the Python class `KernelVar` described below.\n\nClass description:\nImplement the KernelVar class.\n\nMethod signatures and docstrings:\n- def __init__(self, embd_dim, hidden_dim, kernel_dim): Currently, this creates a 2-hidden-layer network with ELU non-linearities.\n- def forward(self, words): Given words, returns batch_kernel of dimension [-1, kernel_dim]\n\n<|skeleton|>\nclass KernelVar:\n\n def __init__(self, embd_dim, hidden_dim, kernel_dim):\n \"\"\"Currently, this creates a 2-hidden-layer network with ELU non-linearities.\"\"\"\n <|body_0|>\n\n def forward(self, words):\n \"\"\"Given words, returns batch_kernel of dimension [-1, kernel_dim]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(KernelVar, self).__init__()\n self.embd_dim = embd_dim\n self.hidden_dim = hidden_dim\n self.kernel_dim = kernel_dim\n self.layer1 = nn.Linear(2 * embd_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, hidden_dim)\n self.layer3 = nn.Linear(hidden_dim, kernel_dim)\n self.net = nn.Sequential(self.layer1, nn.ReLU(), self.layer2, nn.ReLU(), self.layer3)\n self.s_ix = []\n self.e_ix = []\n self.lengths = None\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, max_set_size, embd_dim = words.size()\n self.lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)\n context = (words.sum(1, keepdim=True) / self.lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)\n mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()\n words = words.masked_select(Variable(mask)).view(-1, embd_dim)\n context = context.masked_select(Variable(mask)).view(-1, embd_dim)\n batch_x = torch.cat([words, context], dim=1)\n batch_kernel = self.net(batch_x)\n self.s_ix = list(self.lengths.squeeze().cumsum(0).long().data - self.lengths.squeeze().long().data)\n self.e_ix = list(self.lengths.squeeze().cumsum(0).long().data)\n return (batch_kernel, words)\n<|end_body_1|>\n", "revision_id": "86859b7612433cc6349b427b47c54986224e702a", "skeleton": "<|skeleton|>\nclass KernelVar:\n\n def __init__(self, embd_dim, hidden_dim, kernel_dim):\n \"\"\"Currently, this creates a 2-hidden-layer network with ELU non-linearities.\"\"\"\n <|body_0|>\n\n def forward(self, words):\n \"\"\"Given words, returns batch_kernel of dimension [-1, kernel_dim]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KernelVar:\n def __init__(self, embd_dim, hidden_dim, kernel_dim):\n \"\"\"Currently, this creates a 2-hidden-layer network with ELU non-linearities.\"\"\"\n super(KernelVar, self).__init__()\n self.embd_dim = embd_dim\n self.hidden_dim = hidden_dim\n self.kernel_dim = kernel_dim\n self.layer1 = nn.Linear(2 * embd_dim, hidden_dim)\n self.layer2 = nn.Linear(hidden_dim, hidden_dim)\n self.layer3 = nn.Linear(hidden_dim, kernel_dim)\n self.net = nn.Sequential(self.layer1, nn.ReLU(), self.layer2, nn.ReLU(), self.layer3)\n self.s_ix = []\n self.e_ix = []\n self.lengths = None\n\n def forward(self, words):\n \"\"\"Given words, returns batch_kernel of dimension [-1, kernel_dim]\"\"\"\n batch_size, max_set_size, embd_dim = words.size()\n self.lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)\n context = (words.sum(1, keepdim=True) / self.lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)\n mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()\n words = words.masked_select(Variable(mask)).view(-1, embd_dim)\n context = context.masked_select(Variable(mask)).view(-1, embd_dim)\n batch_x = torch.cat([words, context], dim=1)\n batch_kernel = self.net(batch_x)\n self.s_ix = list(self.lengths.squeeze().cumsum(0).long().data - self.lengths.squeeze().long().data)\n self.e_ix = list(self.lengths.squeeze().cumsum(0).long().data)\n return (batch_kernel, words)\n", "source": "the_stack_v2_python_sparse", "source_path": "dpp_nets/layers/layers.py", "source_repo": "mbp28/dpp_nets", "split": "val", "star_events_count": 1} {"blob_id": "ea8ad6ff1c8eb443684dcf7e4ba8e560c1242348", "bodies": ["lender = self.rate_quote_request.get_par_lender(term, amortization)\nscenarios = self.rate_quote_request.get_scenarios(term, amortization)\npar_lender = self.serializer(lender).data\ndata = self.serializer(scenarios, many=True).data\nresults = {'par_lender': par_lender if par_lender else None, 'request_uuid': self.rate_quote_request.uuid, 'results': data if data else None, 'term': par_lender['term'] if par_lender else None, 'amortization_type': par_lender['amortization_type'] if par_lender else None}\nreturn response.Response(results, status=status.HTTP_200_OK)", "options = {'30 Year': 'Fixed', '15 Year': 'Fixed', '7 Year': 'Variable', '5 Year': 'Variable'}\nlenders = []\nfor term, amortization in options.iteritems():\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n lenders.append(lender if lender else None)\nlenders = self.serializer(lenders, many=True).data\nresults = {'request_uuid': self.rate_quote_request.uuid, 'results': lenders if lenders else None}\nreturn response.Response(results, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n scenarios = self.rate_quote_request.get_scenarios(term, amortization)\n par_lender = self.serializer(lender).data\n data = self.serializer(scenarios, many=True).data\n results = {'par_lender': par_lender if par_lender else None, 'request_uuid': self.rate_quote_request.uuid, 'results': data if data else None, 'term': par_lender['term'] if par_lender else None, 'amortization_type': par_lender['amortization_type'] if par_lender else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n options = {'30 Year': 'Fixed', '15 Year': 'Fixed', '7 Year': 'Variable', '5 Year': 'Variable'}\n lenders = []\n for term, amortization in options.iteritems():\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n lenders.append(lender if lender else None)\n lenders = self.serializer(lenders, many=True).data\n results = {'request_uuid': self.rate_quote_request.uuid, 'results': lenders if lenders else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "class_docstring": "Returns rate quote request and rate quote view results.", "class_name": "RateQuoteMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RateQuoteMixin:\n \"\"\"Returns rate quote request and rate quote view results.\"\"\"\n\n def get_results(self, term=None, amortization=None):\n \"\"\"Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\"\"\"\n <|body_0|>\n\n def get_unique_results(self):\n \"\"\"Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n scenarios = self.rate_quote_request.get_scenarios(term, amortization)\n par_lender = self.serializer(lender).data\n data = self.serializer(scenarios, many=True).data\n results = {'par_lender': par_lender if par_lender else None, 'request_uuid': self.rate_quote_request.uuid, 'results': data if data else None, 'term': par_lender['term'] if par_lender else None, 'amortization_type': par_lender['amortization_type'] if par_lender else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n options = {'30 Year': 'Fixed', '15 Year': 'Fixed', '7 Year': 'Variable', '5 Year': 'Variable'}\n lenders = []\n for term, amortization in options.iteritems():\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n lenders.append(lender if lender else None)\n lenders = self.serializer(lenders, many=True).data\n results = {'request_uuid': self.rate_quote_request.uuid, 'results': lenders if lenders else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000103", "length_bytes": 14021, "license_type": "no_license", "methods": [{"docstring": "Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`", "name": "get_results", "signature": "def get_results(self, term=None, amortization=None)"}, {"docstring": "Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response", "name": "get_unique_results", "signature": "def get_unique_results(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004516", "prompt": "Implement the Python class `RateQuoteMixin` described below.\n\nClass description:\nReturns rate quote request and rate quote view results.\n\nMethod signatures and docstrings:\n- def get_results(self, term=None, amortization=None): Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\n- def get_unique_results(self): Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response", "prompted_full_text": "Implement the Python class `RateQuoteMixin` described below.\n\nClass description:\nReturns rate quote request and rate quote view results.\n\nMethod signatures and docstrings:\n- def get_results(self, term=None, amortization=None): Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\n- def get_unique_results(self): Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response\n\n<|skeleton|>\nclass RateQuoteMixin:\n \"\"\"Returns rate quote request and rate quote view results.\"\"\"\n\n def get_results(self, term=None, amortization=None):\n \"\"\"Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\"\"\"\n <|body_0|>\n\n def get_unique_results(self):\n \"\"\"Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n scenarios = self.rate_quote_request.get_scenarios(term, amortization)\n par_lender = self.serializer(lender).data\n data = self.serializer(scenarios, many=True).data\n results = {'par_lender': par_lender if par_lender else None, 'request_uuid': self.rate_quote_request.uuid, 'results': data if data else None, 'term': par_lender['term'] if par_lender else None, 'amortization_type': par_lender['amortization_type'] if par_lender else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n options = {'30 Year': 'Fixed', '15 Year': 'Fixed', '7 Year': 'Variable', '5 Year': 'Variable'}\n lenders = []\n for term, amortization in options.iteritems():\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n lenders.append(lender if lender else None)\n lenders = self.serializer(lenders, many=True).data\n results = {'request_uuid': self.rate_quote_request.uuid, 'results': lenders if lenders else None}\n return response.Response(results, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "revision_id": "f1a8cd8268d032ea8321e1588e226da09925b7aa", "skeleton": "<|skeleton|>\nclass RateQuoteMixin:\n \"\"\"Returns rate quote request and rate quote view results.\"\"\"\n\n def get_results(self, term=None, amortization=None):\n \"\"\"Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\"\"\"\n <|body_0|>\n\n def get_unique_results(self):\n \"\"\"Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RateQuoteMixin:\n \"\"\"Returns rate quote request and rate quote view results.\"\"\"\n\n def get_results(self, term=None, amortization=None):\n \"\"\"Returns rate quote results. Every result object includes the par_lender and matching scenarios. :param term: str, term :param amortization: str, amortization :return: rate quotes :rtype: `dict`\"\"\"\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n scenarios = self.rate_quote_request.get_scenarios(term, amortization)\n par_lender = self.serializer(lender).data\n data = self.serializer(scenarios, many=True).data\n results = {'par_lender': par_lender if par_lender else None, 'request_uuid': self.rate_quote_request.uuid, 'results': data if data else None, 'term': par_lender['term'] if par_lender else None, 'amortization_type': par_lender['amortization_type'] if par_lender else None}\n return response.Response(results, status=status.HTTP_200_OK)\n\n def get_unique_results(self):\n \"\"\"Return best rate quote scenarios for each term and amortization. - Standard Scenarios: * 30 Year, Fixed * 15 Year, Fixed * 7 Year, Variable * 5 Year, Variable * amortization: Fixed, Variable :return: `dict` rate quotes :rtype: DRF Response\"\"\"\n options = {'30 Year': 'Fixed', '15 Year': 'Fixed', '7 Year': 'Variable', '5 Year': 'Variable'}\n lenders = []\n for term, amortization in options.iteritems():\n lender = self.rate_quote_request.get_par_lender(term, amortization)\n lenders.append(lender if lender else None)\n lenders = self.serializer(lenders, many=True).data\n results = {'request_uuid': self.rate_quote_request.uuid, 'results': lenders if lenders else None}\n return response.Response(results, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "website/apps/mortgage_profiles/views.py", "source_repo": "protoprojects/worksample", "split": "val", "star_events_count": 0} {"blob_id": "62e9f97b44e7011383ece631c6ebb344b76eff66", "bodies": ["link_date_map = self._parse_link_date_map(response)\nmeeting_dt_list = self._parse_upcoming(response)\nmeeting_dates = [dt.date() for dt in meeting_dt_list]\nfor link_date in link_date_map.keys():\n if link_date not in meeting_dates:\n meeting_dt_list.append(datetime.combine(link_date, time(0)))\nfor meeting_dt in set(meeting_dt_list):\n meeting_links = link_date_map[meeting_dt.date()]\n meeting = Meeting(title=self._parse_title(meeting_links), description='', classification=self._parse_classification(meeting_links), start=meeting_dt, end=None, all_day=False, time_notes='', location=self.location, links=meeting_links, source=response.url)\n meeting['status'] = self._get_status(meeting)\n meeting['id'] = self._get_id(meeting)\n yield meeting", "for link in links:\n if 'hearing' in link['title'].lower():\n return link['title'].replace('Notice', '').strip()\n if 'special' in link['title'].lower():\n return 'Special Meeting'\nreturn 'Illinois Medical District Commission'", "for link in links:\n if 'hearing' in link['title'].lower():\n return FORUM\nreturn COMMISSION", "date_re = '\\\\w+\\\\s+\\\\d{1,2}' if year else '\\\\w+\\\\s+\\\\d{1,2},\\\\s+\\\\d{4}'\ndate_match = re.search(date_re, start_str)\nif not date_match:\n return\ndate_str = date_match.group().replace(',', '')\nif year:\n date_str += ' {}'.format(year)\ntime_match = re.search('\\\\d{1,2}:\\\\d{2}\\\\s+[APM\\\\.]{2,4}', start_str)\nif not year or year == '2019':\n time_str = '8:00 AM'\nelse:\n time_str = '12:00 AM'\nif time_match:\n time_str = time_match.group().replace('.', '').strip()\nreturn datetime.strptime('{} {}'.format(date_str, time_str), '%B %d %Y %I:%M %p')", "upcoming_dts = []\nfor upcoming in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16 p *::text'):\n start = self._parse_start(upcoming.extract())\n if start:\n upcoming_dts.append(start)\nreturn upcoming_dts", "link_date_map = defaultdict(list)\nfor link in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16')[:1].css('a'):\n link_str = link.xpath('./text()').extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': link.attrib['href']})\nfor section in response.css('.vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel'):\n year_str = section.css('.vc_tta-title-text::text').extract_first().strip()\n for section_link in section.css('p > a'):\n link_str = section_link.xpath('./text()').extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': section_link.xpath('@href').extract_first()})\nreturn link_date_map"], "bodies_text": "<|body_start_0|>\n link_date_map = self._parse_link_date_map(response)\n meeting_dt_list = self._parse_upcoming(response)\n meeting_dates = [dt.date() for dt in meeting_dt_list]\n for link_date in link_date_map.keys():\n if link_date not in meeting_dates:\n meeting_dt_list.append(datetime.combine(link_date, time(0)))\n for meeting_dt in set(meeting_dt_list):\n meeting_links = link_date_map[meeting_dt.date()]\n meeting = Meeting(title=self._parse_title(meeting_links), description='', classification=self._parse_classification(meeting_links), start=meeting_dt, end=None, all_day=False, time_notes='', location=self.location, links=meeting_links, source=response.url)\n meeting['status'] = self._get_status(meeting)\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return link['title'].replace('Notice', '').strip()\n if 'special' in link['title'].lower():\n return 'Special Meeting'\n return 'Illinois Medical District Commission'\n<|end_body_1|>\n\n<|body_start_2|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return FORUM\n return COMMISSION\n<|end_body_2|>\n\n<|body_start_3|>\n date_re = '\\\\w+\\\\s+\\\\d{1,2}' if year else '\\\\w+\\\\s+\\\\d{1,2},\\\\s+\\\\d{4}'\n date_match = re.search(date_re, start_str)\n if not date_match:\n return\n date_str = date_match.group().replace(',', '')\n if year:\n date_str += ' {}'.format(year)\n time_match = re.search('\\\\d{1,2}:\\\\d{2}\\\\s+[APM\\\\.]{2,4}', start_str)\n if not year or year == '2019':\n time_str = '8:00 AM'\n else:\n time_str = '12:00 AM'\n if time_match:\n time_str = time_match.group().replace('.', '').strip()\n return datetime.strptime('{} {}'.format(date_str, time_str), '%B %d %Y %I:%M %p')\n<|end_body_3|>\n\n<|body_start_4|>\n upcoming_dts = []\n for upcoming in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16 p *::text'):\n start = self._parse_start(upcoming.extract())\n if start:\n upcoming_dts.append(start)\n return upcoming_dts\n<|end_body_4|>\n\n<|body_start_5|>\n link_date_map = defaultdict(list)\n for link in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16')[:1].css('a'):\n link_str = link.xpath('./text()').extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': link.attrib['href']})\n for section in response.css('.vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel'):\n year_str = section.css('.vc_tta-title-text::text').extract_first().strip()\n for section_link in section.css('p > a'):\n link_str = section_link.xpath('./text()').extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': section_link.xpath('@href').extract_first()})\n return link_date_map\n<|end_body_5|>\n", "class_docstring": "", "class_name": "ChiIlMedicalDistrictSpider", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChiIlMedicalDistrictSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, links):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, links):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, start_str, year=None):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_upcoming(self, response):\n \"\"\"Return a list of naive datetimes to upcoming meetings\"\"\"\n <|body_4|>\n\n def _parse_link_date_map(self, response):\n \"\"\"Generate a defaultdict mapping of meeting dates and associated links\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n link_date_map = self._parse_link_date_map(response)\n meeting_dt_list = self._parse_upcoming(response)\n meeting_dates = [dt.date() for dt in meeting_dt_list]\n for link_date in link_date_map.keys():\n if link_date not in meeting_dates:\n meeting_dt_list.append(datetime.combine(link_date, time(0)))\n for meeting_dt in set(meeting_dt_list):\n meeting_links = link_date_map[meeting_dt.date()]\n meeting = Meeting(title=self._parse_title(meeting_links), description='', classification=self._parse_classification(meeting_links), start=meeting_dt, end=None, all_day=False, time_notes='', location=self.location, links=meeting_links, source=response.url)\n meeting['status'] = self._get_status(meeting)\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return link['title'].replace('Notice', '').strip()\n if 'special' in link['title'].lower():\n return 'Special Meeting'\n return 'Illinois Medical District Commission'\n<|end_body_1|>\n\n<|body_start_2|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return FORUM\n return COMMISSION\n<|end_body_2|>\n\n<|body_start_3|>\n date_re = '\\\\w+\\\\s+\\\\d{1,2}' if year else '\\\\w+\\\\s+\\\\d{1,2},\\\\s+\\\\d{4}'\n date_match = re.search(date_re, start_str)\n if not date_match:\n return\n date_str = date_match.group().replace(',', '')\n if year:\n date_str += ' {}'.format(year)\n time_match = re.search('\\\\d{1,2}:\\\\d{2}\\\\s+[APM\\\\.]{2,4}', start_str)\n if not year or year == '2019':\n time_str = '8:00 AM'\n else:\n time_str = '12:00 AM'\n if time_match:\n time_str = time_match.group().replace('.', '').strip()\n return datetime.strptime('{} {}'.format(date_str, time_str), '%B %d %Y %I:%M %p')\n<|end_body_3|>\n\n<|body_start_4|>\n upcoming_dts = []\n for upcoming in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16 p *::text'):\n start = self._parse_start(upcoming.extract())\n if start:\n upcoming_dts.append(start)\n return upcoming_dts\n<|end_body_4|>\n\n<|body_start_5|>\n link_date_map = defaultdict(list)\n for link in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16')[:1].css('a'):\n link_str = link.xpath('./text()').extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': link.attrib['href']})\n for section in response.css('.vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel'):\n year_str = section.css('.vc_tta-title-text::text').extract_first().strip()\n for section_link in section.css('p > a'):\n link_str = section_link.xpath('./text()').extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': section_link.xpath('@href').extract_first()})\n return link_date_map\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000104", "length_bytes": 5680, "license_type": "permissive", "methods": [{"docstring": "`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.", "name": "parse", "signature": "def parse(self, response)"}, {"docstring": "Parse or generate meeting title.", "name": "_parse_title", "signature": "def _parse_title(self, links)"}, {"docstring": "Parse or generate classification from allowed options.", "name": "_parse_classification", "signature": "def _parse_classification(self, links)"}, {"docstring": "Parse start datetime as a naive datetime object.", "name": "_parse_start", "signature": "def _parse_start(self, start_str, year=None)"}, {"docstring": "Return a list of naive datetimes to upcoming meetings", "name": "_parse_upcoming", "signature": "def _parse_upcoming(self, response)"}, {"docstring": "Generate a defaultdict mapping of meeting dates and associated links", "name": "_parse_link_date_map", "signature": "def _parse_link_date_map(self, response)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_002541", "prompt": "Implement the Python class `ChiIlMedicalDistrictSpider` described below.\n\nClass description:\nImplement the ChiIlMedicalDistrictSpider class.\n\nMethod signatures and docstrings:\n- def parse(self, response): `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\n- def _parse_title(self, links): Parse or generate meeting title.\n- def _parse_classification(self, links): Parse or generate classification from allowed options.\n- def _parse_start(self, start_str, year=None): Parse start datetime as a naive datetime object.\n- def _parse_upcoming(self, response): Return a list of naive datetimes to upcoming meetings\n- def _parse_link_date_map(self, response): Generate a defaultdict mapping of meeting dates and associated links", "prompted_full_text": "Implement the Python class `ChiIlMedicalDistrictSpider` described below.\n\nClass description:\nImplement the ChiIlMedicalDistrictSpider class.\n\nMethod signatures and docstrings:\n- def parse(self, response): `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\n- def _parse_title(self, links): Parse or generate meeting title.\n- def _parse_classification(self, links): Parse or generate classification from allowed options.\n- def _parse_start(self, start_str, year=None): Parse start datetime as a naive datetime object.\n- def _parse_upcoming(self, response): Return a list of naive datetimes to upcoming meetings\n- def _parse_link_date_map(self, response): Generate a defaultdict mapping of meeting dates and associated links\n\n<|skeleton|>\nclass ChiIlMedicalDistrictSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, links):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, links):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, start_str, year=None):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_upcoming(self, response):\n \"\"\"Return a list of naive datetimes to upcoming meetings\"\"\"\n <|body_4|>\n\n def _parse_link_date_map(self, response):\n \"\"\"Generate a defaultdict mapping of meeting dates and associated links\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n link_date_map = self._parse_link_date_map(response)\n meeting_dt_list = self._parse_upcoming(response)\n meeting_dates = [dt.date() for dt in meeting_dt_list]\n for link_date in link_date_map.keys():\n if link_date not in meeting_dates:\n meeting_dt_list.append(datetime.combine(link_date, time(0)))\n for meeting_dt in set(meeting_dt_list):\n meeting_links = link_date_map[meeting_dt.date()]\n meeting = Meeting(title=self._parse_title(meeting_links), description='', classification=self._parse_classification(meeting_links), start=meeting_dt, end=None, all_day=False, time_notes='', location=self.location, links=meeting_links, source=response.url)\n meeting['status'] = self._get_status(meeting)\n meeting['id'] = self._get_id(meeting)\n yield meeting\n<|end_body_0|>\n\n<|body_start_1|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return link['title'].replace('Notice', '').strip()\n if 'special' in link['title'].lower():\n return 'Special Meeting'\n return 'Illinois Medical District Commission'\n<|end_body_1|>\n\n<|body_start_2|>\n for link in links:\n if 'hearing' in link['title'].lower():\n return FORUM\n return COMMISSION\n<|end_body_2|>\n\n<|body_start_3|>\n date_re = '\\\\w+\\\\s+\\\\d{1,2}' if year else '\\\\w+\\\\s+\\\\d{1,2},\\\\s+\\\\d{4}'\n date_match = re.search(date_re, start_str)\n if not date_match:\n return\n date_str = date_match.group().replace(',', '')\n if year:\n date_str += ' {}'.format(year)\n time_match = re.search('\\\\d{1,2}:\\\\d{2}\\\\s+[APM\\\\.]{2,4}', start_str)\n if not year or year == '2019':\n time_str = '8:00 AM'\n else:\n time_str = '12:00 AM'\n if time_match:\n time_str = time_match.group().replace('.', '').strip()\n return datetime.strptime('{} {}'.format(date_str, time_str), '%B %d %Y %I:%M %p')\n<|end_body_3|>\n\n<|body_start_4|>\n upcoming_dts = []\n for upcoming in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16 p *::text'):\n start = self._parse_start(upcoming.extract())\n if start:\n upcoming_dts.append(start)\n return upcoming_dts\n<|end_body_4|>\n\n<|body_start_5|>\n link_date_map = defaultdict(list)\n for link in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16')[:1].css('a'):\n link_str = link.xpath('./text()').extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': link.attrib['href']})\n for section in response.css('.vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel'):\n year_str = section.css('.vc_tta-title-text::text').extract_first().strip()\n for section_link in section.css('p > a'):\n link_str = section_link.xpath('./text()').extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': section_link.xpath('@href').extract_first()})\n return link_date_map\n<|end_body_5|>\n", "revision_id": "611fce6a2705446e25a2fc33e32090a571eb35d1", "skeleton": "<|skeleton|>\nclass ChiIlMedicalDistrictSpider:\n\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n <|body_0|>\n\n def _parse_title(self, links):\n \"\"\"Parse or generate meeting title.\"\"\"\n <|body_1|>\n\n def _parse_classification(self, links):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n <|body_2|>\n\n def _parse_start(self, start_str, year=None):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n <|body_3|>\n\n def _parse_upcoming(self, response):\n \"\"\"Return a list of naive datetimes to upcoming meetings\"\"\"\n <|body_4|>\n\n def _parse_link_date_map(self, response):\n \"\"\"Generate a defaultdict mapping of meeting dates and associated links\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ChiIlMedicalDistrictSpider:\n def parse(self, response):\n \"\"\"`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.\"\"\"\n link_date_map = self._parse_link_date_map(response)\n meeting_dt_list = self._parse_upcoming(response)\n meeting_dates = [dt.date() for dt in meeting_dt_list]\n for link_date in link_date_map.keys():\n if link_date not in meeting_dates:\n meeting_dt_list.append(datetime.combine(link_date, time(0)))\n for meeting_dt in set(meeting_dt_list):\n meeting_links = link_date_map[meeting_dt.date()]\n meeting = Meeting(title=self._parse_title(meeting_links), description='', classification=self._parse_classification(meeting_links), start=meeting_dt, end=None, all_day=False, time_notes='', location=self.location, links=meeting_links, source=response.url)\n meeting['status'] = self._get_status(meeting)\n meeting['id'] = self._get_id(meeting)\n yield meeting\n\n def _parse_title(self, links):\n \"\"\"Parse or generate meeting title.\"\"\"\n for link in links:\n if 'hearing' in link['title'].lower():\n return link['title'].replace('Notice', '').strip()\n if 'special' in link['title'].lower():\n return 'Special Meeting'\n return 'Illinois Medical District Commission'\n\n def _parse_classification(self, links):\n \"\"\"Parse or generate classification from allowed options.\"\"\"\n for link in links:\n if 'hearing' in link['title'].lower():\n return FORUM\n return COMMISSION\n\n def _parse_start(self, start_str, year=None):\n \"\"\"Parse start datetime as a naive datetime object.\"\"\"\n date_re = '\\\\w+\\\\s+\\\\d{1,2}' if year else '\\\\w+\\\\s+\\\\d{1,2},\\\\s+\\\\d{4}'\n date_match = re.search(date_re, start_str)\n if not date_match:\n return\n date_str = date_match.group().replace(',', '')\n if year:\n date_str += ' {}'.format(year)\n time_match = re.search('\\\\d{1,2}:\\\\d{2}\\\\s+[APM\\\\.]{2,4}', start_str)\n if not year or year == '2019':\n time_str = '8:00 AM'\n else:\n time_str = '12:00 AM'\n if time_match:\n time_str = time_match.group().replace('.', '').strip()\n return datetime.strptime('{} {}'.format(date_str, time_str), '%B %d %Y %I:%M %p')\n\n def _parse_upcoming(self, response):\n \"\"\"Return a list of naive datetimes to upcoming meetings\"\"\"\n upcoming_dts = []\n for upcoming in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16 p *::text'):\n start = self._parse_start(upcoming.extract())\n if start:\n upcoming_dts.append(start)\n return upcoming_dts\n\n def _parse_link_date_map(self, response):\n \"\"\"Generate a defaultdict mapping of meeting dates and associated links\"\"\"\n link_date_map = defaultdict(list)\n for link in response.css('.vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16')[:1].css('a'):\n link_str = link.xpath('./text()').extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': link.attrib['href']})\n for section in response.css('.vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel'):\n year_str = section.css('.vc_tta-title-text::text').extract_first().strip()\n for section_link in section.css('p > a'):\n link_str = section_link.xpath('./text()').extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append({'title': re.sub('\\\\s+', ' ', link_str.split(' – ')[-1]).strip(), 'href': section_link.xpath('@href').extract_first()})\n return link_date_map\n", "source": "the_stack_v2_python_sparse", "source_path": "city_scrapers/spiders/chi_il_medical_district.py", "source_repo": "City-Bureau/city-scrapers", "split": "val", "star_events_count": 308} {"blob_id": "5719de02c8b56e9c1a4c5b8efa338146b0461852", "bodies": ["super(Generator, self).__init__()\ninitializer = tf.random_normal_initializer(0.0, 0.02)\nself.down1 = Downsample(64, 4, apply_batchnorm=False)\nself.down2 = Downsample(128, 4)\nself.down3 = Downsample(256, 4)\nself.down4 = Downsample(512, 4)\nself.down5 = Downsample(512, 4)\nself.down6 = Downsample(512, 4)\nself.down7 = Downsample(512, 4)\nself.down8 = Downsample(512, 4)\nself.up1 = Upsample(512, 4, apply_dropout=True)\nself.up2 = Upsample(512, 4, apply_dropout=True)\nself.up3 = Upsample(512, 4, apply_dropout=True)\nself.up4 = Upsample(512, 4)\nself.up5 = Upsample(256, 4)\nself.up6 = Upsample(128, 4)\nself.up7 = Upsample(64, 4)\nself.last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, (4, 4), strides=2, padding='same', kernel_initializer=initializer)", "x1 = self.down1(x, training=training)\nx2 = self.down2(x1, training=training)\nx3 = self.down3(x2, training=training)\nx4 = self.down4(x3, training=training)\nx5 = self.down5(x4, training=training)\nx6 = self.down6(x5, training=training)\nx7 = self.down7(x6, training=training)\nx8 = self.down8(x7, training=training)\nx9 = self.up1(x8, x7, training=training)\nx10 = self.up2(x9, x6, training=training)\nx11 = self.up3(x10, x5, training=training)\nx12 = self.up4(x11, x4, training=training)\nx13 = self.up5(x12, x3, training=training)\nx14 = self.up6(x13, x2, training=training)\nx15 = self.up7(x14, x1, training=training)\nx16 = self.last(x15)\nx16 = tf.nn.tanh(x16)\nreturn x16"], "bodies_text": "<|body_start_0|>\n super(Generator, self).__init__()\n initializer = tf.random_normal_initializer(0.0, 0.02)\n self.down1 = Downsample(64, 4, apply_batchnorm=False)\n self.down2 = Downsample(128, 4)\n self.down3 = Downsample(256, 4)\n self.down4 = Downsample(512, 4)\n self.down5 = Downsample(512, 4)\n self.down6 = Downsample(512, 4)\n self.down7 = Downsample(512, 4)\n self.down8 = Downsample(512, 4)\n self.up1 = Upsample(512, 4, apply_dropout=True)\n self.up2 = Upsample(512, 4, apply_dropout=True)\n self.up3 = Upsample(512, 4, apply_dropout=True)\n self.up4 = Upsample(512, 4)\n self.up5 = Upsample(256, 4)\n self.up6 = Upsample(128, 4)\n self.up7 = Upsample(64, 4)\n self.last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, (4, 4), strides=2, padding='same', kernel_initializer=initializer)\n<|end_body_0|>\n\n<|body_start_1|>\n x1 = self.down1(x, training=training)\n x2 = self.down2(x1, training=training)\n x3 = self.down3(x2, training=training)\n x4 = self.down4(x3, training=training)\n x5 = self.down5(x4, training=training)\n x6 = self.down6(x5, training=training)\n x7 = self.down7(x6, training=training)\n x8 = self.down8(x7, training=training)\n x9 = self.up1(x8, x7, training=training)\n x10 = self.up2(x9, x6, training=training)\n x11 = self.up3(x10, x5, training=training)\n x12 = self.up4(x11, x4, training=training)\n x13 = self.up5(x12, x3, training=training)\n x14 = self.up6(x13, x2, training=training)\n x15 = self.up7(x14, x1, training=training)\n x16 = self.last(x15)\n x16 = tf.nn.tanh(x16)\n return x16\n<|end_body_1|>\n", "class_docstring": "The architecture of generator is a modified U-Net.", "class_name": "Generator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generator:\n \"\"\"The architecture of generator is a modified U-Net.\"\"\"\n\n def __init__(self):\n \"\"\"The construct function.\"\"\"\n <|body_0|>\n\n def call(self, x, training=True):\n \"\"\"Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n initializer = tf.random_normal_initializer(0.0, 0.02)\n self.down1 = Downsample(64, 4, apply_batchnorm=False)\n self.down2 = Downsample(128, 4)\n self.down3 = Downsample(256, 4)\n self.down4 = Downsample(512, 4)\n self.down5 = Downsample(512, 4)\n self.down6 = Downsample(512, 4)\n self.down7 = Downsample(512, 4)\n self.down8 = Downsample(512, 4)\n self.up1 = Upsample(512, 4, apply_dropout=True)\n self.up2 = Upsample(512, 4, apply_dropout=True)\n self.up3 = Upsample(512, 4, apply_dropout=True)\n self.up4 = Upsample(512, 4)\n self.up5 = Upsample(256, 4)\n self.up6 = Upsample(128, 4)\n self.up7 = Upsample(64, 4)\n self.last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, (4, 4), strides=2, padding='same', kernel_initializer=initializer)\n<|end_body_0|>\n\n<|body_start_1|>\n x1 = self.down1(x, training=training)\n x2 = self.down2(x1, training=training)\n x3 = self.down3(x2, training=training)\n x4 = self.down4(x3, training=training)\n x5 = self.down5(x4, training=training)\n x6 = self.down6(x5, training=training)\n x7 = self.down7(x6, training=training)\n x8 = self.down8(x7, training=training)\n x9 = self.up1(x8, x7, training=training)\n x10 = self.up2(x9, x6, training=training)\n x11 = self.up3(x10, x5, training=training)\n x12 = self.up4(x11, x4, training=training)\n x13 = self.up5(x12, x3, training=training)\n x14 = self.up6(x13, x2, training=training)\n x15 = self.up7(x14, x1, training=training)\n x16 = self.last(x15)\n x16 = tf.nn.tanh(x16)\n return x16\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000105", "length_bytes": 20044, "license_type": "no_license", "methods": [{"docstring": "The construct function.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.", "name": "call", "signature": "def call(self, x, training=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005671", "prompt": "Implement the Python class `Generator` described below.\n\nClass description:\nThe architecture of generator is a modified U-Net.\n\nMethod signatures and docstrings:\n- def __init__(self): The construct function.\n- def call(self, x, training=True): Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.", "prompted_full_text": "Implement the Python class `Generator` described below.\n\nClass description:\nThe architecture of generator is a modified U-Net.\n\nMethod signatures and docstrings:\n- def __init__(self): The construct function.\n- def call(self, x, training=True): Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.\n\n<|skeleton|>\nclass Generator:\n \"\"\"The architecture of generator is a modified U-Net.\"\"\"\n\n def __init__(self):\n \"\"\"The construct function.\"\"\"\n <|body_0|>\n\n def call(self, x, training=True):\n \"\"\"Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n initializer = tf.random_normal_initializer(0.0, 0.02)\n self.down1 = Downsample(64, 4, apply_batchnorm=False)\n self.down2 = Downsample(128, 4)\n self.down3 = Downsample(256, 4)\n self.down4 = Downsample(512, 4)\n self.down5 = Downsample(512, 4)\n self.down6 = Downsample(512, 4)\n self.down7 = Downsample(512, 4)\n self.down8 = Downsample(512, 4)\n self.up1 = Upsample(512, 4, apply_dropout=True)\n self.up2 = Upsample(512, 4, apply_dropout=True)\n self.up3 = Upsample(512, 4, apply_dropout=True)\n self.up4 = Upsample(512, 4)\n self.up5 = Upsample(256, 4)\n self.up6 = Upsample(128, 4)\n self.up7 = Upsample(64, 4)\n self.last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, (4, 4), strides=2, padding='same', kernel_initializer=initializer)\n<|end_body_0|>\n\n<|body_start_1|>\n x1 = self.down1(x, training=training)\n x2 = self.down2(x1, training=training)\n x3 = self.down3(x2, training=training)\n x4 = self.down4(x3, training=training)\n x5 = self.down5(x4, training=training)\n x6 = self.down6(x5, training=training)\n x7 = self.down7(x6, training=training)\n x8 = self.down8(x7, training=training)\n x9 = self.up1(x8, x7, training=training)\n x10 = self.up2(x9, x6, training=training)\n x11 = self.up3(x10, x5, training=training)\n x12 = self.up4(x11, x4, training=training)\n x13 = self.up5(x12, x3, training=training)\n x14 = self.up6(x13, x2, training=training)\n x15 = self.up7(x14, x1, training=training)\n x16 = self.last(x15)\n x16 = tf.nn.tanh(x16)\n return x16\n<|end_body_1|>\n", "revision_id": "d1b70b2a954f4665b628ba252b03c1a74b95559f", "skeleton": "<|skeleton|>\nclass Generator:\n \"\"\"The architecture of generator is a modified U-Net.\"\"\"\n\n def __init__(self):\n \"\"\"The construct function.\"\"\"\n <|body_0|>\n\n def call(self, x, training=True):\n \"\"\"Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Generator:\n \"\"\"The architecture of generator is a modified U-Net.\"\"\"\n\n def __init__(self):\n \"\"\"The construct function.\"\"\"\n super(Generator, self).__init__()\n initializer = tf.random_normal_initializer(0.0, 0.02)\n self.down1 = Downsample(64, 4, apply_batchnorm=False)\n self.down2 = Downsample(128, 4)\n self.down3 = Downsample(256, 4)\n self.down4 = Downsample(512, 4)\n self.down5 = Downsample(512, 4)\n self.down6 = Downsample(512, 4)\n self.down7 = Downsample(512, 4)\n self.down8 = Downsample(512, 4)\n self.up1 = Upsample(512, 4, apply_dropout=True)\n self.up2 = Upsample(512, 4, apply_dropout=True)\n self.up3 = Upsample(512, 4, apply_dropout=True)\n self.up4 = Upsample(512, 4)\n self.up5 = Upsample(256, 4)\n self.up6 = Upsample(128, 4)\n self.up7 = Upsample(64, 4)\n self.last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, (4, 4), strides=2, padding='same', kernel_initializer=initializer)\n\n def call(self, x, training=True):\n \"\"\"Calls the model on new inputs. Args: x: The origin image before translation. training: If training. Returns: The generated image.\"\"\"\n x1 = self.down1(x, training=training)\n x2 = self.down2(x1, training=training)\n x3 = self.down3(x2, training=training)\n x4 = self.down4(x3, training=training)\n x5 = self.down5(x4, training=training)\n x6 = self.down6(x5, training=training)\n x7 = self.down7(x6, training=training)\n x8 = self.down8(x7, training=training)\n x9 = self.up1(x8, x7, training=training)\n x10 = self.up2(x9, x6, training=training)\n x11 = self.up3(x10, x5, training=training)\n x12 = self.up4(x11, x4, training=training)\n x13 = self.up5(x12, x3, training=training)\n x14 = self.up6(x13, x2, training=training)\n x15 = self.up7(x14, x1, training=training)\n x16 = self.last(x15)\n x16 = tf.nn.tanh(x16)\n return x16\n", "source": "the_stack_v2_python_sparse", "source_path": "NeuralNetworks-tensorflow/generation_network_model/GAN/pix2pix.py", "source_repo": "zhaocc1106/machine_learn", "split": "val", "star_events_count": 15} {"blob_id": "86450e2bc9f0da56b212ebbbfc145b8a78ea74a9", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Proto file describing the FeedItemTarget service. Service to manage feed item targets.", "class_name": "FeedItemTargetServiceServicer", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeedItemTargetServiceServicer:\n \"\"\"Proto file describing the FeedItemTarget service. Service to manage feed item targets.\"\"\"\n\n def GetFeedItemTarget(self, request, context):\n \"\"\"Returns the requested feed item targets in full detail.\"\"\"\n <|body_0|>\n\n def MutateFeedItemTargets(self, request, context):\n \"\"\"Creates or removes feed item targets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000106", "length_bytes": 3507, "license_type": "permissive", "methods": [{"docstring": "Returns the requested feed item targets in full detail.", "name": "GetFeedItemTarget", "signature": "def GetFeedItemTarget(self, request, context)"}, {"docstring": "Creates or removes feed item targets. Operation statuses are returned.", "name": "MutateFeedItemTargets", "signature": "def MutateFeedItemTargets(self, request, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000172", "prompt": "Implement the Python class `FeedItemTargetServiceServicer` described below.\n\nClass description:\nProto file describing the FeedItemTarget service. Service to manage feed item targets.\n\nMethod signatures and docstrings:\n- def GetFeedItemTarget(self, request, context): Returns the requested feed item targets in full detail.\n- def MutateFeedItemTargets(self, request, context): Creates or removes feed item targets. Operation statuses are returned.", "prompted_full_text": "Implement the Python class `FeedItemTargetServiceServicer` described below.\n\nClass description:\nProto file describing the FeedItemTarget service. Service to manage feed item targets.\n\nMethod signatures and docstrings:\n- def GetFeedItemTarget(self, request, context): Returns the requested feed item targets in full detail.\n- def MutateFeedItemTargets(self, request, context): Creates or removes feed item targets. Operation statuses are returned.\n\n<|skeleton|>\nclass FeedItemTargetServiceServicer:\n \"\"\"Proto file describing the FeedItemTarget service. Service to manage feed item targets.\"\"\"\n\n def GetFeedItemTarget(self, request, context):\n \"\"\"Returns the requested feed item targets in full detail.\"\"\"\n <|body_0|>\n\n def MutateFeedItemTargets(self, request, context):\n \"\"\"Creates or removes feed item targets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a", "skeleton": "<|skeleton|>\nclass FeedItemTargetServiceServicer:\n \"\"\"Proto file describing the FeedItemTarget service. Service to manage feed item targets.\"\"\"\n\n def GetFeedItemTarget(self, request, context):\n \"\"\"Returns the requested feed item targets in full detail.\"\"\"\n <|body_0|>\n\n def MutateFeedItemTargets(self, request, context):\n \"\"\"Creates or removes feed item targets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FeedItemTargetServiceServicer:\n \"\"\"Proto file describing the FeedItemTarget service. Service to manage feed item targets.\"\"\"\n\n def GetFeedItemTarget(self, request, context):\n \"\"\"Returns the requested feed item targets in full detail.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def MutateFeedItemTargets(self, request, context):\n \"\"\"Creates or removes feed item targets. Operation statuses are returned.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "google/ads/google_ads/v1/proto/services/feed_item_target_service_pb2_grpc.py", "source_repo": "juanmacugat/google-ads-python", "split": "val", "star_events_count": 1} {"blob_id": "58e5588004556cf3f4b225684954358ecbe010fd", "bodies": ["json_data = request.get_json()\nb = event_builders.FbEventBuilder()\nb.build_with_fb_dict(json_data)\ne: event_models.SocialEvent = b.export_as_class(event_models.SocialEvent)\nref: DocumentReference = EventDao().create_fb_event(e)\ne.set_firestore_ref(ref)\ndict_view = e.to_dict_view()\ndict_view['eventId'] = ref.id\nUserDao().add_user_event_dict(uid, dict_view['fbEventId'], dict_view)\nreturn {'id': e.get_firestore_ref().id}", "json_data = request.get_json()\nevent_dicts = json_data['data']\nids = list()\nfor event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\nreturn {'ids': ids}"], "bodies_text": "<|body_start_0|>\n json_data = request.get_json()\n b = event_builders.FbEventBuilder()\n b.build_with_fb_dict(json_data)\n e: event_models.SocialEvent = b.export_as_class(event_models.SocialEvent)\n ref: DocumentReference = EventDao().create_fb_event(e)\n e.set_firestore_ref(ref)\n dict_view = e.to_dict_view()\n dict_view['eventId'] = ref.id\n UserDao().add_user_event_dict(uid, dict_view['fbEventId'], dict_view)\n return {'id': e.get_firestore_ref().id}\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n event_dicts = json_data['data']\n ids = list()\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n return {'ids': ids}\n<|end_body_1|>\n", "class_docstring": "Handles user facebook event upload", "class_name": "UserEventService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserEventService:\n \"\"\"Handles user facebook event upload\"\"\"\n\n def post(self, uid):\n \"\"\"Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\"\"\"\n <|body_0|>\n\n def put(self, uid):\n \"\"\"Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n json_data = request.get_json()\n b = event_builders.FbEventBuilder()\n b.build_with_fb_dict(json_data)\n e: event_models.SocialEvent = b.export_as_class(event_models.SocialEvent)\n ref: DocumentReference = EventDao().create_fb_event(e)\n e.set_firestore_ref(ref)\n dict_view = e.to_dict_view()\n dict_view['eventId'] = ref.id\n UserDao().add_user_event_dict(uid, dict_view['fbEventId'], dict_view)\n return {'id': e.get_firestore_ref().id}\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n event_dicts = json_data['data']\n ids = list()\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n return {'ids': ids}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000107", "length_bytes": 8250, "license_type": "no_license", "methods": [{"docstring": "Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty", "name": "post", "signature": "def post(self, uid)"}, {"docstring": "Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:", "name": "put", "signature": "def put(self, uid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004055", "prompt": "Implement the Python class `UserEventService` described below.\n\nClass description:\nHandles user facebook event upload\n\nMethod signatures and docstrings:\n- def post(self, uid): Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\n- def put(self, uid): Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:", "prompted_full_text": "Implement the Python class `UserEventService` described below.\n\nClass description:\nHandles user facebook event upload\n\nMethod signatures and docstrings:\n- def post(self, uid): Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\n- def put(self, uid): Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:\n\n<|skeleton|>\nclass UserEventService:\n \"\"\"Handles user facebook event upload\"\"\"\n\n def post(self, uid):\n \"\"\"Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\"\"\"\n <|body_0|>\n\n def put(self, uid):\n \"\"\"Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n json_data = request.get_json()\n b = event_builders.FbEventBuilder()\n b.build_with_fb_dict(json_data)\n e: event_models.SocialEvent = b.export_as_class(event_models.SocialEvent)\n ref: DocumentReference = EventDao().create_fb_event(e)\n e.set_firestore_ref(ref)\n dict_view = e.to_dict_view()\n dict_view['eventId'] = ref.id\n UserDao().add_user_event_dict(uid, dict_view['fbEventId'], dict_view)\n return {'id': e.get_firestore_ref().id}\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json()\n event_dicts = json_data['data']\n ids = list()\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n return {'ids': ids}\n<|end_body_1|>\n", "revision_id": "f1e98f0002046cb4c932f9f1badbdf2eb8af92d1", "skeleton": "<|skeleton|>\nclass UserEventService:\n \"\"\"Handles user facebook event upload\"\"\"\n\n def post(self, uid):\n \"\"\"Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\"\"\"\n <|body_0|>\n\n def put(self, uid):\n \"\"\"Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserEventService:\n \"\"\"Handles user facebook event upload\"\"\"\n\n def post(self, uid):\n \"\"\"Creates a new event with Facebook event JSON (that is obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: id: UserEventJSON type: object required: - description - end_time - start_time - place - id properties: description: type: string example: \"Advance Sale begins Friday, 6/1 at 11AM PDT www.coachella.com\" end_time: type: string example: \"2019-04-14T23:59:00-0700\" start_time: type: string example: \"2019-04-12T12:00:00-0700\" place: type: object properties: name: type: string example: \"Coachella\" location: type: object properties: latitude: type: number example: 33.679974 longitude: type: number example: -116.237221 id: example: \"20281766647\" id: ty\"\"\"\n json_data = request.get_json()\n b = event_builders.FbEventBuilder()\n b.build_with_fb_dict(json_data)\n e: event_models.SocialEvent = b.export_as_class(event_models.SocialEvent)\n ref: DocumentReference = EventDao().create_fb_event(e)\n e.set_firestore_ref(ref)\n dict_view = e.to_dict_view()\n dict_view['eventId'] = ref.id\n UserDao().add_user_event_dict(uid, dict_view['fbEventId'], dict_view)\n return {'id': e.get_firestore_ref().id}\n\n def put(self, uid):\n \"\"\"Creates many new events with a list of Facebook event JSON's (that are obtained from Facebook Graph API). --- tags: - events parameters: - in: body name: body schema: properties: data: type: array items: $ref: \"#/definitions/UserEventJSON\" :param uid: :return:\"\"\"\n json_data = request.get_json()\n event_dicts = json_data['data']\n ids = list()\n for event_dict in event_dicts:\n event_id = create_fb_event(event_dict, uid)\n ids.append(event_id)\n return {'ids': ids}\n", "source": "the_stack_v2_python_sparse", "source_path": "gravitate/api_server/event/services.py", "source_repo": "billyrrr/gravitate-backend", "split": "val", "star_events_count": 0} {"blob_id": "8025622be7d2119d8e871d28c60c3c01ea0a493e", "bodies": ["if x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\nreturn -CArray(metrics.pairwise.pairwise_distances(x.get_data(), self._rv.get_data(), metric='chebyshev'))", "if not self._cached_x.is_vector_like:\n raise ValueError('kernel gradient can be computed only wrt vector-like arrays.')\nif self._rv is None:\n raise ValueError('Please run forward with caching=True or set `rv` first.')\nif self._cached_x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\ndiff = self._rv - self._cached_x\nm = abs(diff).max(axis=1)\ngrad = CArray.zeros(shape=diff.shape)\ngrad[diff >= m] = 1\ngrad[diff <= -m] = -1\nreturn grad if w is None else w.dot(grad)"], "bodies_text": "<|body_start_0|>\n if x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n return -CArray(metrics.pairwise.pairwise_distances(x.get_data(), self._rv.get_data(), metric='chebyshev'))\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._cached_x.is_vector_like:\n raise ValueError('kernel gradient can be computed only wrt vector-like arrays.')\n if self._rv is None:\n raise ValueError('Please run forward with caching=True or set `rv` first.')\n if self._cached_x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n diff = self._rv - self._cached_x\n m = abs(diff).max(axis=1)\n grad = CArray.zeros(shape=diff.shape)\n grad[diff >= m] = 1\n grad[diff <= -m] = -1\n return grad if w is None else w.dot(grad)\n<|end_body_1|>\n", "class_docstring": "Chebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])", "class_name": "CKernelChebyshevDistance", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CKernelChebyshevDistance:\n \"\"\"Chebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\"\"\"\n\n def _forward(self, x):\n \"\"\"Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\"\"\"\n <|body_0|>\n\n def _backward(self, w=None):\n \"\"\"Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n return -CArray(metrics.pairwise.pairwise_distances(x.get_data(), self._rv.get_data(), metric='chebyshev'))\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._cached_x.is_vector_like:\n raise ValueError('kernel gradient can be computed only wrt vector-like arrays.')\n if self._rv is None:\n raise ValueError('Please run forward with caching=True or set `rv` first.')\n if self._cached_x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n diff = self._rv - self._cached_x\n m = abs(diff).max(axis=1)\n grad = CArray.zeros(shape=diff.shape)\n grad[diff >= m] = 1\n grad[diff <= -m] = -1\n return grad if w is None else w.dot(grad)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000108", "length_bytes": 3292, "license_type": "permissive", "methods": [{"docstring": "Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).", "name": "_forward", "signature": "def _forward(self, x)"}, {"docstring": "Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).", "name": "_backward", "signature": "def _backward(self, w=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003762", "prompt": "Implement the Python class `CKernelChebyshevDistance` described below.\n\nClass description:\nChebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\n\nMethod signatures and docstrings:\n- def _forward(self, x): Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\n- def _backward(self, w=None): Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).", "prompted_full_text": "Implement the Python class `CKernelChebyshevDistance` described below.\n\nClass description:\nChebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\n\nMethod signatures and docstrings:\n- def _forward(self, x): Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\n- def _backward(self, w=None): Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).\n\n<|skeleton|>\nclass CKernelChebyshevDistance:\n \"\"\"Chebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\"\"\"\n\n def _forward(self, x):\n \"\"\"Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\"\"\"\n <|body_0|>\n\n def _backward(self, w=None):\n \"\"\"Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n return -CArray(metrics.pairwise.pairwise_distances(x.get_data(), self._rv.get_data(), metric='chebyshev'))\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._cached_x.is_vector_like:\n raise ValueError('kernel gradient can be computed only wrt vector-like arrays.')\n if self._rv is None:\n raise ValueError('Please run forward with caching=True or set `rv` first.')\n if self._cached_x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n diff = self._rv - self._cached_x\n m = abs(diff).max(axis=1)\n grad = CArray.zeros(shape=diff.shape)\n grad[diff >= m] = 1\n grad[diff <= -m] = -1\n return grad if w is None else w.dot(grad)\n<|end_body_1|>\n", "revision_id": "431373e65d8cfe2cb7cf042ce1a6c9519ea5a14a", "skeleton": "<|skeleton|>\nclass CKernelChebyshevDistance:\n \"\"\"Chebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\"\"\"\n\n def _forward(self, x):\n \"\"\"Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\"\"\"\n <|body_0|>\n\n def _backward(self, w=None):\n \"\"\"Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CKernelChebyshevDistance:\n \"\"\"Chebyshev distance kernel. Given matrices X and RV, this is computed as:: K(x, rv) = max(|x - rv|) for each pair of rows in X and in RV. Attributes ---------- class_type : 'chebyshev-dist' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels import CKernelChebyshevDistance >>> x = CArray([[1,2],[3,4]]) >>> v = CArray([[5,6],[7,8]]) >>> print(CKernelChebyshevDistance().k(x,v)) CArray([[-4. -6.] [-2. -4.]]) >>> print(CKernelChebyshevDistance().k(x)) CArray([[-0. -2.] [-2. -0.]])\"\"\"\n\n def _forward(self, x):\n \"\"\"Compute (negative) Chebyshev distances between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and cached rv, shape (n_x, n_rv).\"\"\"\n if x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n return -CArray(metrics.pairwise.pairwise_distances(x.get_data(), self._rv.get_data(), metric='chebyshev'))\n\n def _backward(self, w=None):\n \"\"\"Calculate gradients of Chebyshev kernel wrt cached vector 'x'. The gradient of the negative Chebyshev distance is given by:: dK(rv,x)/dx = - sign(rv - x) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features).\"\"\"\n if not self._cached_x.is_vector_like:\n raise ValueError('kernel gradient can be computed only wrt vector-like arrays.')\n if self._rv is None:\n raise ValueError('Please run forward with caching=True or set `rv` first.')\n if self._cached_x.issparse is True or self._rv.issparse is True:\n raise TypeError('Chebyshev Kernel not available for sparse data.See `sklearn.metrics.pairwise_distances`.')\n diff = self._rv - self._cached_x\n m = abs(diff).max(axis=1)\n grad = CArray.zeros(shape=diff.shape)\n grad[diff >= m] = 1\n grad[diff <= -m] = -1\n return grad if w is None else w.dot(grad)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/secml/ml/kernels/c_kernel_chebyshev_distance.py", "source_repo": "Cinofix/secml", "split": "val", "star_events_count": 0} {"blob_id": "57d7c6e3d3fce553f2a7c46a93511e196f9136a9", "bodies": ["super().__init__()\nlogger.debug('Create PaddleCLSConnectionHandler to process the cls request')\nself._inputs = OrderedDict()\nself._outputs = OrderedDict()\nself.cls_engine = cls_engine\nself.executor = self.cls_engine.executor\nself._conf = self.executor._conf\nself._label_list = self.executor._label_list\nself.model = self.executor.model", "self.preprocess(io.BytesIO(audio_data))\nst = time.time()\nself.infer()\ninfer_time = time.time() - st\nlogger.debug('inference time: {}'.format(infer_time))\nlogger.info('cls engine type: python')", "assert topk <= len(self._label_list), 'Value of topk is larger than number of labels.'\nresult = self._outputs['logits'].squeeze(0).numpy()\ntopk_idx = (-result).argsort()[:topk]\ntopk_results = []\nfor idx in topk_idx:\n res = {}\n label, score = (self._label_list[idx], result[idx])\n res['class_name'] = label\n res['prob'] = score\n topk_results.append(res)\nreturn topk_results"], "bodies_text": "<|body_start_0|>\n super().__init__()\n logger.debug('Create PaddleCLSConnectionHandler to process the cls request')\n self._inputs = OrderedDict()\n self._outputs = OrderedDict()\n self.cls_engine = cls_engine\n self.executor = self.cls_engine.executor\n self._conf = self.executor._conf\n self._label_list = self.executor._label_list\n self.model = self.executor.model\n<|end_body_0|>\n\n<|body_start_1|>\n self.preprocess(io.BytesIO(audio_data))\n st = time.time()\n self.infer()\n infer_time = time.time() - st\n logger.debug('inference time: {}'.format(infer_time))\n logger.info('cls engine type: python')\n<|end_body_1|>\n\n<|body_start_2|>\n assert topk <= len(self._label_list), 'Value of topk is larger than number of labels.'\n result = self._outputs['logits'].squeeze(0).numpy()\n topk_idx = (-result).argsort()[:topk]\n topk_results = []\n for idx in topk_idx:\n res = {}\n label, score = (self._label_list[idx], result[idx])\n res['class_name'] = label\n res['prob'] = score\n topk_results.append(res)\n return topk_results\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PaddleCLSConnectionHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PaddleCLSConnectionHandler:\n\n def __init__(self, cls_engine):\n \"\"\"The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\"\"\"\n <|body_0|>\n\n def run(self, audio_data):\n \"\"\"engine run Args: audio_data (bytes): base64.b64decode\"\"\"\n <|body_1|>\n\n def postprocess(self, topk: int):\n \"\"\"postprocess\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n logger.debug('Create PaddleCLSConnectionHandler to process the cls request')\n self._inputs = OrderedDict()\n self._outputs = OrderedDict()\n self.cls_engine = cls_engine\n self.executor = self.cls_engine.executor\n self._conf = self.executor._conf\n self._label_list = self.executor._label_list\n self.model = self.executor.model\n<|end_body_0|>\n\n<|body_start_1|>\n self.preprocess(io.BytesIO(audio_data))\n st = time.time()\n self.infer()\n infer_time = time.time() - st\n logger.debug('inference time: {}'.format(infer_time))\n logger.info('cls engine type: python')\n<|end_body_1|>\n\n<|body_start_2|>\n assert topk <= len(self._label_list), 'Value of topk is larger than number of labels.'\n result = self._outputs['logits'].squeeze(0).numpy()\n topk_idx = (-result).argsort()[:topk]\n topk_results = []\n for idx in topk_idx:\n res = {}\n label, score = (self._label_list[idx], result[idx])\n res['class_name'] = label\n res['prob'] = score\n topk_results.append(res)\n return topk_results\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000109", "length_bytes": 4065, "license_type": "permissive", "methods": [{"docstring": "The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine", "name": "__init__", "signature": "def __init__(self, cls_engine)"}, {"docstring": "engine run Args: audio_data (bytes): base64.b64decode", "name": "run", "signature": "def run(self, audio_data)"}, {"docstring": "postprocess", "name": "postprocess", "signature": "def postprocess(self, topk: int)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006214", "prompt": "Implement the Python class `PaddleCLSConnectionHandler` described below.\n\nClass description:\nImplement the PaddleCLSConnectionHandler class.\n\nMethod signatures and docstrings:\n- def __init__(self, cls_engine): The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\n- def run(self, audio_data): engine run Args: audio_data (bytes): base64.b64decode\n- def postprocess(self, topk: int): postprocess", "prompted_full_text": "Implement the Python class `PaddleCLSConnectionHandler` described below.\n\nClass description:\nImplement the PaddleCLSConnectionHandler class.\n\nMethod signatures and docstrings:\n- def __init__(self, cls_engine): The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\n- def run(self, audio_data): engine run Args: audio_data (bytes): base64.b64decode\n- def postprocess(self, topk: int): postprocess\n\n<|skeleton|>\nclass PaddleCLSConnectionHandler:\n\n def __init__(self, cls_engine):\n \"\"\"The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\"\"\"\n <|body_0|>\n\n def run(self, audio_data):\n \"\"\"engine run Args: audio_data (bytes): base64.b64decode\"\"\"\n <|body_1|>\n\n def postprocess(self, topk: int):\n \"\"\"postprocess\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n logger.debug('Create PaddleCLSConnectionHandler to process the cls request')\n self._inputs = OrderedDict()\n self._outputs = OrderedDict()\n self.cls_engine = cls_engine\n self.executor = self.cls_engine.executor\n self._conf = self.executor._conf\n self._label_list = self.executor._label_list\n self.model = self.executor.model\n<|end_body_0|>\n\n<|body_start_1|>\n self.preprocess(io.BytesIO(audio_data))\n st = time.time()\n self.infer()\n infer_time = time.time() - st\n logger.debug('inference time: {}'.format(infer_time))\n logger.info('cls engine type: python')\n<|end_body_1|>\n\n<|body_start_2|>\n assert topk <= len(self._label_list), 'Value of topk is larger than number of labels.'\n result = self._outputs['logits'].squeeze(0).numpy()\n topk_idx = (-result).argsort()[:topk]\n topk_results = []\n for idx in topk_idx:\n res = {}\n label, score = (self._label_list[idx], result[idx])\n res['class_name'] = label\n res['prob'] = score\n topk_results.append(res)\n return topk_results\n<|end_body_2|>\n", "revision_id": "17854a04d43c231eff66bfed9d6aa55e94a29e79", "skeleton": "<|skeleton|>\nclass PaddleCLSConnectionHandler:\n\n def __init__(self, cls_engine):\n \"\"\"The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\"\"\"\n <|body_0|>\n\n def run(self, audio_data):\n \"\"\"engine run Args: audio_data (bytes): base64.b64decode\"\"\"\n <|body_1|>\n\n def postprocess(self, topk: int):\n \"\"\"postprocess\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PaddleCLSConnectionHandler:\n def __init__(self, cls_engine):\n \"\"\"The PaddleSpeech CLS Server Connection Handler This connection process every cls server request Args: cls_engine (CLSEngine): The CLS engine\"\"\"\n super().__init__()\n logger.debug('Create PaddleCLSConnectionHandler to process the cls request')\n self._inputs = OrderedDict()\n self._outputs = OrderedDict()\n self.cls_engine = cls_engine\n self.executor = self.cls_engine.executor\n self._conf = self.executor._conf\n self._label_list = self.executor._label_list\n self.model = self.executor.model\n\n def run(self, audio_data):\n \"\"\"engine run Args: audio_data (bytes): base64.b64decode\"\"\"\n self.preprocess(io.BytesIO(audio_data))\n st = time.time()\n self.infer()\n infer_time = time.time() - st\n logger.debug('inference time: {}'.format(infer_time))\n logger.info('cls engine type: python')\n\n def postprocess(self, topk: int):\n \"\"\"postprocess\"\"\"\n assert topk <= len(self._label_list), 'Value of topk is larger than number of labels.'\n result = self._outputs['logits'].squeeze(0).numpy()\n topk_idx = (-result).argsort()[:topk]\n topk_results = []\n for idx in topk_idx:\n res = {}\n label, score = (self._label_list[idx], result[idx])\n res['class_name'] = label\n res['prob'] = score\n topk_results.append(res)\n return topk_results\n", "source": "the_stack_v2_python_sparse", "source_path": "paddlespeech/server/engine/cls/python/cls_engine.py", "source_repo": "anniyanvr/DeepSpeech-1", "split": "val", "star_events_count": 0} {"blob_id": "2c9de69a49f13f14a8e37bb4b44036914a38c7c6", "bodies": ["cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\nresponse = create_response(200)\nif cert_setting.count() > 0:\n response.data = {'flag': True}\nelse:\n response.data = {'flag': False}\nreturn response.get_response()", "cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\nif not cert_setting.count() > 0:\n return HttpResponseRedirect('/mall2/weixin_certificate/')\nif 'id' in request.GET:\n project_id = 'new_app:group:%s' % request.GET.get('related_page_id', 0)\n try:\n group = app_models.Group.objects.get(id=request.GET['id'])\n except:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n is_create_new_data = False\nelse:\n group = None\n is_create_new_data = True\n project_id = 'new_app:group:0'\n_, app_name, real_project_id = project_id.split(':')\nif real_project_id != '0':\n pagestore = pagestore_manager.get_pagestore('mongo')\n pages = pagestore.get_page_components(real_project_id)\n if not pages:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\nc = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'group': group, 'is_create_new_data': is_create_new_data, 'project_id': project_id})\nreturn render_to_response('group/templates/editor/workbench.html', c)", "data = request_util.get_fields_to_be_save(request)\ngroup = app_models.Group(**data)\ngroup.save()\nerror_msg = None\ndata = json.loads(group.to_json())\ndata['id'] = data['_id']['$oid']\nif error_msg:\n data['error_msg'] = error_msg\nresponse = create_response(200)\nresponse.data = data\nreturn response.get_response()", "data = request_util.get_fields_to_be_save(request)\nupdate_data = {}\nupdate_fields = set(['name', 'start_time', 'end_time', 'group_dict', 'status', 'product_id', 'product_img', 'product_name', 'product_price', 'product_socks', 'product_sales', 'product_usercode', 'product_create_at', 'rules', 'material_image', 'share_description'])\nfor key, value in data.items():\n if key in update_fields:\n update_data['set__' + key] = value\napp_models.Group.objects(id=request.POST['id']).update(**update_data)\nresponse = create_response(200)\nreturn response.get_response()", "app_models.Group.objects(id=request.POST['id']).update(set__is_use=app_models.IS_USE_NO)\nresponse = create_response(200)\nreturn response.get_response()"], "bodies_text": "<|body_start_0|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n response = create_response(200)\n if cert_setting.count() > 0:\n response.data = {'flag': True}\n else:\n response.data = {'flag': False}\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n if not cert_setting.count() > 0:\n return HttpResponseRedirect('/mall2/weixin_certificate/')\n if 'id' in request.GET:\n project_id = 'new_app:group:%s' % request.GET.get('related_page_id', 0)\n try:\n group = app_models.Group.objects.get(id=request.GET['id'])\n except:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n is_create_new_data = False\n else:\n group = None\n is_create_new_data = True\n project_id = 'new_app:group:0'\n _, app_name, real_project_id = project_id.split(':')\n if real_project_id != '0':\n pagestore = pagestore_manager.get_pagestore('mongo')\n pages = pagestore.get_page_components(real_project_id)\n if not pages:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'group': group, 'is_create_new_data': is_create_new_data, 'project_id': project_id})\n return render_to_response('group/templates/editor/workbench.html', c)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request_util.get_fields_to_be_save(request)\n group = app_models.Group(**data)\n group.save()\n error_msg = None\n data = json.loads(group.to_json())\n data['id'] = data['_id']['$oid']\n if error_msg:\n data['error_msg'] = error_msg\n response = create_response(200)\n response.data = data\n return response.get_response()\n<|end_body_2|>\n\n<|body_start_3|>\n data = request_util.get_fields_to_be_save(request)\n update_data = {}\n update_fields = set(['name', 'start_time', 'end_time', 'group_dict', 'status', 'product_id', 'product_img', 'product_name', 'product_price', 'product_socks', 'product_sales', 'product_usercode', 'product_create_at', 'rules', 'material_image', 'share_description'])\n for key, value in data.items():\n if key in update_fields:\n update_data['set__' + key] = value\n app_models.Group.objects(id=request.POST['id']).update(**update_data)\n response = create_response(200)\n return response.get_response()\n<|end_body_3|>\n\n<|body_start_4|>\n app_models.Group.objects(id=request.POST['id']).update(set__is_use=app_models.IS_USE_NO)\n response = create_response(200)\n return response.get_response()\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Group", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Group:\n\n def api_get(request):\n \"\"\"响应GET\"\"\"\n <|body_0|>\n\n def get(request):\n \"\"\"响应GET\"\"\"\n <|body_1|>\n\n def api_put(request):\n \"\"\"响应PUT\"\"\"\n <|body_2|>\n\n def api_post(request):\n \"\"\"响应POST\"\"\"\n <|body_3|>\n\n def api_delete(request):\n \"\"\"响应DELETE\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n response = create_response(200)\n if cert_setting.count() > 0:\n response.data = {'flag': True}\n else:\n response.data = {'flag': False}\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n if not cert_setting.count() > 0:\n return HttpResponseRedirect('/mall2/weixin_certificate/')\n if 'id' in request.GET:\n project_id = 'new_app:group:%s' % request.GET.get('related_page_id', 0)\n try:\n group = app_models.Group.objects.get(id=request.GET['id'])\n except:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n is_create_new_data = False\n else:\n group = None\n is_create_new_data = True\n project_id = 'new_app:group:0'\n _, app_name, real_project_id = project_id.split(':')\n if real_project_id != '0':\n pagestore = pagestore_manager.get_pagestore('mongo')\n pages = pagestore.get_page_components(real_project_id)\n if not pages:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'group': group, 'is_create_new_data': is_create_new_data, 'project_id': project_id})\n return render_to_response('group/templates/editor/workbench.html', c)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request_util.get_fields_to_be_save(request)\n group = app_models.Group(**data)\n group.save()\n error_msg = None\n data = json.loads(group.to_json())\n data['id'] = data['_id']['$oid']\n if error_msg:\n data['error_msg'] = error_msg\n response = create_response(200)\n response.data = data\n return response.get_response()\n<|end_body_2|>\n\n<|body_start_3|>\n data = request_util.get_fields_to_be_save(request)\n update_data = {}\n update_fields = set(['name', 'start_time', 'end_time', 'group_dict', 'status', 'product_id', 'product_img', 'product_name', 'product_price', 'product_socks', 'product_sales', 'product_usercode', 'product_create_at', 'rules', 'material_image', 'share_description'])\n for key, value in data.items():\n if key in update_fields:\n update_data['set__' + key] = value\n app_models.Group.objects(id=request.POST['id']).update(**update_data)\n response = create_response(200)\n return response.get_response()\n<|end_body_3|>\n\n<|body_start_4|>\n app_models.Group.objects(id=request.POST['id']).update(set__is_use=app_models.IS_USE_NO)\n response = create_response(200)\n return response.get_response()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000110", "length_bytes": 4576, "license_type": "no_license", "methods": [{"docstring": "响应GET", "name": "api_get", "signature": "def api_get(request)"}, {"docstring": "响应GET", "name": "get", "signature": "def get(request)"}, {"docstring": "响应PUT", "name": "api_put", "signature": "def api_put(request)"}, {"docstring": "响应POST", "name": "api_post", "signature": "def api_post(request)"}, {"docstring": "响应DELETE", "name": "api_delete", "signature": "def api_delete(request)"}], "n_methods": 5, "prompt": "Implement the Python class `Group` described below.\n\nClass description:\nImplement the Group class.\n\nMethod signatures and docstrings:\n- def api_get(request): 响应GET\n- def get(request): 响应GET\n- def api_put(request): 响应PUT\n- def api_post(request): 响应POST\n- def api_delete(request): 响应DELETE", "prompted_full_text": "Implement the Python class `Group` described below.\n\nClass description:\nImplement the Group class.\n\nMethod signatures and docstrings:\n- def api_get(request): 响应GET\n- def get(request): 响应GET\n- def api_put(request): 响应PUT\n- def api_post(request): 响应POST\n- def api_delete(request): 响应DELETE\n\n<|skeleton|>\nclass Group:\n\n def api_get(request):\n \"\"\"响应GET\"\"\"\n <|body_0|>\n\n def get(request):\n \"\"\"响应GET\"\"\"\n <|body_1|>\n\n def api_put(request):\n \"\"\"响应PUT\"\"\"\n <|body_2|>\n\n def api_post(request):\n \"\"\"响应POST\"\"\"\n <|body_3|>\n\n def api_delete(request):\n \"\"\"响应DELETE\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n response = create_response(200)\n if cert_setting.count() > 0:\n response.data = {'flag': True}\n else:\n response.data = {'flag': False}\n return response.get_response()\n<|end_body_0|>\n\n<|body_start_1|>\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n if not cert_setting.count() > 0:\n return HttpResponseRedirect('/mall2/weixin_certificate/')\n if 'id' in request.GET:\n project_id = 'new_app:group:%s' % request.GET.get('related_page_id', 0)\n try:\n group = app_models.Group.objects.get(id=request.GET['id'])\n except:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n is_create_new_data = False\n else:\n group = None\n is_create_new_data = True\n project_id = 'new_app:group:0'\n _, app_name, real_project_id = project_id.split(':')\n if real_project_id != '0':\n pagestore = pagestore_manager.get_pagestore('mongo')\n pages = pagestore.get_page_components(real_project_id)\n if not pages:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'group': group, 'is_create_new_data': is_create_new_data, 'project_id': project_id})\n return render_to_response('group/templates/editor/workbench.html', c)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request_util.get_fields_to_be_save(request)\n group = app_models.Group(**data)\n group.save()\n error_msg = None\n data = json.loads(group.to_json())\n data['id'] = data['_id']['$oid']\n if error_msg:\n data['error_msg'] = error_msg\n response = create_response(200)\n response.data = data\n return response.get_response()\n<|end_body_2|>\n\n<|body_start_3|>\n data = request_util.get_fields_to_be_save(request)\n update_data = {}\n update_fields = set(['name', 'start_time', 'end_time', 'group_dict', 'status', 'product_id', 'product_img', 'product_name', 'product_price', 'product_socks', 'product_sales', 'product_usercode', 'product_create_at', 'rules', 'material_image', 'share_description'])\n for key, value in data.items():\n if key in update_fields:\n update_data['set__' + key] = value\n app_models.Group.objects(id=request.POST['id']).update(**update_data)\n response = create_response(200)\n return response.get_response()\n<|end_body_3|>\n\n<|body_start_4|>\n app_models.Group.objects(id=request.POST['id']).update(set__is_use=app_models.IS_USE_NO)\n response = create_response(200)\n return response.get_response()\n<|end_body_4|>\n", "revision_id": "8b2f7befe92841bcc35e0e60cac5958ef3f3af54", "skeleton": "<|skeleton|>\nclass Group:\n\n def api_get(request):\n \"\"\"响应GET\"\"\"\n <|body_0|>\n\n def get(request):\n \"\"\"响应GET\"\"\"\n <|body_1|>\n\n def api_put(request):\n \"\"\"响应PUT\"\"\"\n <|body_2|>\n\n def api_post(request):\n \"\"\"响应POST\"\"\"\n <|body_3|>\n\n def api_delete(request):\n \"\"\"响应DELETE\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Group:\n def api_get(request):\n \"\"\"响应GET\"\"\"\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n response = create_response(200)\n if cert_setting.count() > 0:\n response.data = {'flag': True}\n else:\n response.data = {'flag': False}\n return response.get_response()\n\n def get(request):\n \"\"\"响应GET\"\"\"\n cert_setting = WxCertSettings.objects.filter(owner_id=request.manager.id)\n if not cert_setting.count() > 0:\n return HttpResponseRedirect('/mall2/weixin_certificate/')\n if 'id' in request.GET:\n project_id = 'new_app:group:%s' % request.GET.get('related_page_id', 0)\n try:\n group = app_models.Group.objects.get(id=request.GET['id'])\n except:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n is_create_new_data = False\n else:\n group = None\n is_create_new_data = True\n project_id = 'new_app:group:0'\n _, app_name, real_project_id = project_id.split(':')\n if real_project_id != '0':\n pagestore = pagestore_manager.get_pagestore('mongo')\n pages = pagestore.get_page_components(real_project_id)\n if not pages:\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'is_deleted_data': True})\n return render_to_response('group/templates/editor/workbench.html', c)\n c = RequestContext(request, {'first_nav_name': FIRST_NAV, 'second_navs': mall_export.get_promotion_and_apps_second_navs(request), 'second_nav_name': mall_export.MALL_APPS_SECOND_NAV, 'third_nav_name': 'groups', 'group': group, 'is_create_new_data': is_create_new_data, 'project_id': project_id})\n return render_to_response('group/templates/editor/workbench.html', c)\n\n def api_put(request):\n \"\"\"响应PUT\"\"\"\n data = request_util.get_fields_to_be_save(request)\n group = app_models.Group(**data)\n group.save()\n error_msg = None\n data = json.loads(group.to_json())\n data['id'] = data['_id']['$oid']\n if error_msg:\n data['error_msg'] = error_msg\n response = create_response(200)\n response.data = data\n return response.get_response()\n\n def api_post(request):\n \"\"\"响应POST\"\"\"\n data = request_util.get_fields_to_be_save(request)\n update_data = {}\n update_fields = set(['name', 'start_time', 'end_time', 'group_dict', 'status', 'product_id', 'product_img', 'product_name', 'product_price', 'product_socks', 'product_sales', 'product_usercode', 'product_create_at', 'rules', 'material_image', 'share_description'])\n for key, value in data.items():\n if key in update_fields:\n update_data['set__' + key] = value\n app_models.Group.objects(id=request.POST['id']).update(**update_data)\n response = create_response(200)\n return response.get_response()\n\n def api_delete(request):\n \"\"\"响应DELETE\"\"\"\n app_models.Group.objects(id=request.POST['id']).update(set__is_use=app_models.IS_USE_NO)\n response = create_response(200)\n return response.get_response()\n", "source": "the_stack_v2_python_sparse", "source_path": "weapp/apps/customerized_apps/group/group.py", "source_repo": "chengdg/weizoom", "split": "val", "star_events_count": 1} {"blob_id": "60e091d1b582be3cd32147f3374be9d3ce03cb25", "bodies": ["self.foldername = foldername\nself.s3_config_json_filename = os.path.join(AWS_CRED_DIR, 'aws_s3_credentials.json')\nself.s3_util = S3Util(aws_cred_config_json_filename=self.s3_config_json_filename)\nself.msg_printer = wasabi.Printer()\nself.interact()", "choices = []\npath = pathlib.Path(self.foldername)\nfor dir in path.iterdir():\n choices.append(Choice(str(dir)))\nchoices.append(Choice('exit'))\nfolder_chose_question = questionary.select('Folder in the directory. Chose one to move to s3', qmark='❓', choices=choices)\nfolder_type_answer = folder_chose_question.ask()\nreturn folder_type_answer", "while True:\n answer = self.get_folder_choice()\n if answer == 'exit':\n break\n else:\n with self.msg_printer.loading(f'Uploading {answer} to s3'):\n folder_name = answer\n base_folder_name = pathlib.Path(answer).name\n self.s3_util.upload_folder(folder_name=answer, base_folder_name=base_folder_name)\n self.msg_printer.good(f'Moved folder {answer} to s3')\n deletion_answer = self.ask_deletion()\n if deletion_answer == 'yes':\n folder_path = pathlib.Path(answer)\n shutil.rmtree(folder_path)", "deletion_question = questionary.rawselect('Do you also want to delete the file locally. Caution! File will be removed locally', qmark='❓', choices=[Choice('yes'), Choice('no')])\ndeletion_answer = deletion_question.ask()\nreturn deletion_answer"], "bodies_text": "<|body_start_0|>\n self.foldername = foldername\n self.s3_config_json_filename = os.path.join(AWS_CRED_DIR, 'aws_s3_credentials.json')\n self.s3_util = S3Util(aws_cred_config_json_filename=self.s3_config_json_filename)\n self.msg_printer = wasabi.Printer()\n self.interact()\n<|end_body_0|>\n\n<|body_start_1|>\n choices = []\n path = pathlib.Path(self.foldername)\n for dir in path.iterdir():\n choices.append(Choice(str(dir)))\n choices.append(Choice('exit'))\n folder_chose_question = questionary.select('Folder in the directory. Chose one to move to s3', qmark='❓', choices=choices)\n folder_type_answer = folder_chose_question.ask()\n return folder_type_answer\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n answer = self.get_folder_choice()\n if answer == 'exit':\n break\n else:\n with self.msg_printer.loading(f'Uploading {answer} to s3'):\n folder_name = answer\n base_folder_name = pathlib.Path(answer).name\n self.s3_util.upload_folder(folder_name=answer, base_folder_name=base_folder_name)\n self.msg_printer.good(f'Moved folder {answer} to s3')\n deletion_answer = self.ask_deletion()\n if deletion_answer == 'yes':\n folder_path = pathlib.Path(answer)\n shutil.rmtree(folder_path)\n<|end_body_2|>\n\n<|body_start_3|>\n deletion_question = questionary.rawselect('Do you also want to delete the file locally. Caution! File will be removed locally', qmark='❓', choices=[Choice('yes'), Choice('no')])\n deletion_answer = deletion_question.ask()\n return deletion_answer\n<|end_body_3|>\n", "class_docstring": "", "class_name": "S3OutputMove", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass S3OutputMove:\n\n def __init__(self, foldername: str):\n \"\"\"Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\"\"\"\n <|body_0|>\n\n def get_folder_choice(self):\n \"\"\"Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\"\"\"\n <|body_1|>\n\n def interact(self):\n \"\"\"Interacts with the user by providing various options\"\"\"\n <|body_2|>\n\n def ask_deletion() -> str:\n \"\"\"Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.foldername = foldername\n self.s3_config_json_filename = os.path.join(AWS_CRED_DIR, 'aws_s3_credentials.json')\n self.s3_util = S3Util(aws_cred_config_json_filename=self.s3_config_json_filename)\n self.msg_printer = wasabi.Printer()\n self.interact()\n<|end_body_0|>\n\n<|body_start_1|>\n choices = []\n path = pathlib.Path(self.foldername)\n for dir in path.iterdir():\n choices.append(Choice(str(dir)))\n choices.append(Choice('exit'))\n folder_chose_question = questionary.select('Folder in the directory. Chose one to move to s3', qmark='❓', choices=choices)\n folder_type_answer = folder_chose_question.ask()\n return folder_type_answer\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n answer = self.get_folder_choice()\n if answer == 'exit':\n break\n else:\n with self.msg_printer.loading(f'Uploading {answer} to s3'):\n folder_name = answer\n base_folder_name = pathlib.Path(answer).name\n self.s3_util.upload_folder(folder_name=answer, base_folder_name=base_folder_name)\n self.msg_printer.good(f'Moved folder {answer} to s3')\n deletion_answer = self.ask_deletion()\n if deletion_answer == 'yes':\n folder_path = pathlib.Path(answer)\n shutil.rmtree(folder_path)\n<|end_body_2|>\n\n<|body_start_3|>\n deletion_question = questionary.rawselect('Do you also want to delete the file locally. Caution! File will be removed locally', qmark='❓', choices=[Choice('yes'), Choice('no')])\n deletion_answer = deletion_question.ask()\n return deletion_answer\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000111", "length_bytes": 3125, "license_type": "permissive", "methods": [{"docstring": "Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket", "name": "__init__", "signature": "def __init__(self, foldername: str)"}, {"docstring": "Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved", "name": "get_folder_choice", "signature": "def get_folder_choice(self)"}, {"docstring": "Interacts with the user by providing various options", "name": "interact", "signature": "def interact(self)"}, {"docstring": "Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question", "name": "ask_deletion", "signature": "def ask_deletion() -> str"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006948", "prompt": "Implement the Python class `S3OutputMove` described below.\n\nClass description:\nImplement the S3OutputMove class.\n\nMethod signatures and docstrings:\n- def __init__(self, foldername: str): Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\n- def get_folder_choice(self): Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\n- def interact(self): Interacts with the user by providing various options\n- def ask_deletion() -> str: Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question", "prompted_full_text": "Implement the Python class `S3OutputMove` described below.\n\nClass description:\nImplement the S3OutputMove class.\n\nMethod signatures and docstrings:\n- def __init__(self, foldername: str): Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\n- def get_folder_choice(self): Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\n- def interact(self): Interacts with the user by providing various options\n- def ask_deletion() -> str: Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question\n\n<|skeleton|>\nclass S3OutputMove:\n\n def __init__(self, foldername: str):\n \"\"\"Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\"\"\"\n <|body_0|>\n\n def get_folder_choice(self):\n \"\"\"Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\"\"\"\n <|body_1|>\n\n def interact(self):\n \"\"\"Interacts with the user by providing various options\"\"\"\n <|body_2|>\n\n def ask_deletion() -> str:\n \"\"\"Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.foldername = foldername\n self.s3_config_json_filename = os.path.join(AWS_CRED_DIR, 'aws_s3_credentials.json')\n self.s3_util = S3Util(aws_cred_config_json_filename=self.s3_config_json_filename)\n self.msg_printer = wasabi.Printer()\n self.interact()\n<|end_body_0|>\n\n<|body_start_1|>\n choices = []\n path = pathlib.Path(self.foldername)\n for dir in path.iterdir():\n choices.append(Choice(str(dir)))\n choices.append(Choice('exit'))\n folder_chose_question = questionary.select('Folder in the directory. Chose one to move to s3', qmark='❓', choices=choices)\n folder_type_answer = folder_chose_question.ask()\n return folder_type_answer\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n answer = self.get_folder_choice()\n if answer == 'exit':\n break\n else:\n with self.msg_printer.loading(f'Uploading {answer} to s3'):\n folder_name = answer\n base_folder_name = pathlib.Path(answer).name\n self.s3_util.upload_folder(folder_name=answer, base_folder_name=base_folder_name)\n self.msg_printer.good(f'Moved folder {answer} to s3')\n deletion_answer = self.ask_deletion()\n if deletion_answer == 'yes':\n folder_path = pathlib.Path(answer)\n shutil.rmtree(folder_path)\n<|end_body_2|>\n\n<|body_start_3|>\n deletion_question = questionary.rawselect('Do you also want to delete the file locally. Caution! File will be removed locally', qmark='❓', choices=[Choice('yes'), Choice('no')])\n deletion_answer = deletion_question.ask()\n return deletion_answer\n<|end_body_3|>\n", "revision_id": "1c061b99a35a9d8b565d9762aaaf5db979b50112", "skeleton": "<|skeleton|>\nclass S3OutputMove:\n\n def __init__(self, foldername: str):\n \"\"\"Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\"\"\"\n <|body_0|>\n\n def get_folder_choice(self):\n \"\"\"Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\"\"\"\n <|body_1|>\n\n def interact(self):\n \"\"\"Interacts with the user by providing various options\"\"\"\n <|body_2|>\n\n def ask_deletion() -> str:\n \"\"\"Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class S3OutputMove:\n def __init__(self, foldername: str):\n \"\"\"Provides an interactive way to move some folders to s3 Parameters ---------- foldername : str The folder name which will be moved to S3 bucket\"\"\"\n self.foldername = foldername\n self.s3_config_json_filename = os.path.join(AWS_CRED_DIR, 'aws_s3_credentials.json')\n self.s3_util = S3Util(aws_cred_config_json_filename=self.s3_config_json_filename)\n self.msg_printer = wasabi.Printer()\n self.interact()\n\n def get_folder_choice(self):\n \"\"\"Goes through the folder and gets the choice on which folder should be moved Returns ------- str The folder which is chosen to be moved\"\"\"\n choices = []\n path = pathlib.Path(self.foldername)\n for dir in path.iterdir():\n choices.append(Choice(str(dir)))\n choices.append(Choice('exit'))\n folder_chose_question = questionary.select('Folder in the directory. Chose one to move to s3', qmark='❓', choices=choices)\n folder_type_answer = folder_chose_question.ask()\n return folder_type_answer\n\n def interact(self):\n \"\"\"Interacts with the user by providing various options\"\"\"\n while True:\n answer = self.get_folder_choice()\n if answer == 'exit':\n break\n else:\n with self.msg_printer.loading(f'Uploading {answer} to s3'):\n folder_name = answer\n base_folder_name = pathlib.Path(answer).name\n self.s3_util.upload_folder(folder_name=answer, base_folder_name=base_folder_name)\n self.msg_printer.good(f'Moved folder {answer} to s3')\n deletion_answer = self.ask_deletion()\n if deletion_answer == 'yes':\n folder_path = pathlib.Path(answer)\n shutil.rmtree(folder_path)\n\n def ask_deletion() -> str:\n \"\"\"Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question\"\"\"\n deletion_question = questionary.rawselect('Do you also want to delete the file locally. Caution! File will be removed locally', qmark='❓', choices=[Choice('yes'), Choice('no')])\n deletion_answer = deletion_question.ask()\n return deletion_answer\n", "source": "the_stack_v2_python_sparse", "source_path": "sciwing/cli/s3_mv_cli.py", "source_repo": "abhinavkashyap/sciwing", "split": "val", "star_events_count": 58} {"blob_id": "e955820313eabe3d29d4c56fc56d7e425b7eeb83", "bodies": ["super().__init__()\nif isinstance(output_size, int):\n output_size = (output_size, output_size)\nassert len(output_size) == 2\nassert isinstance(output_size[0], int) and isinstance(output_size[1], int)\nself.output_size = output_size\nif pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\nelif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\nelif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\nelif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\nelse:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\nmin_level = -math.log2(scales[0])\nmax_level = -math.log2(scales[-1])\nassert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\nself.min_level = int(min_level)\nself.max_level = int(max_level)\nassert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\nassert 0 <= self.min_level and self.min_level <= self.max_level\nself.canonical_level = canonical_level\nassert canonical_box_size > 0\nself.canonical_box_size = canonical_box_size", "num_level_assignments = len(self.level_poolers)\nif not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\nassert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\nassert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\nif len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\npooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\nif num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\nlevel_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\nnum_channels = x[0].shape[1]\noutput_size = self.output_size[0]\noutput = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\nfor level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\nreturn output"], "bodies_text": "<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "class_docstring": "Region of interest feature map pooler that supports pooling from one or more feature maps.", "class_name": "ROIPooler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000112", "length_bytes": 11509, "license_type": "permissive", "methods": [{"docstring": "Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can", "name": "__init__", "signature": "def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4)"}, {"docstring": "Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.", "name": "forward", "signature": "def forward(self, x: List[torch.Tensor], box_lists: List[Boxes])"}], "n_methods": 2, "prompt": "Implement the Python class `ROIPooler` described below.\n\nClass description:\nRegion of interest feature map pooler that supports pooling from one or more feature maps.\n\nMethod signatures and docstrings:\n- def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4): Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\n- def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.", "prompted_full_text": "Implement the Python class `ROIPooler` described below.\n\nClass description:\nRegion of interest feature map pooler that supports pooling from one or more feature maps.\n\nMethod signatures and docstrings:\n- def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4): Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\n- def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\n\n<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "revision_id": "80307d2d5e06f06a8a677cc2653f23a4c56402ac", "skeleton": "<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "detectron2/modeling/poolers.py", "source_repo": "facebookresearch/detectron2", "split": "val", "star_events_count": 27469} {"blob_id": "fbbb0d6a9f0ee144b78aecc0f1a0cd0bad3c45ad", "bodies": ["data = self._current_data\nkwargs = super(ClusterSerializer, self).data_info()\nif kwargs and self._detail:\n pass\nreturn kwargs", "data = self._current_data\nhosts = data.host\nbest_host = None\nbest_perform = 0\nfor host in hosts:\n host_serializer = HostSystemSerializer(host).data\n free_mem_mb = host_serializer.get('free_mem_mb')\n if free_mem_mb > best_perform:\n best_perform = free_mem_mb\n best_host = host\nreturn best_host"], "bodies_text": "<|body_start_0|>\n data = self._current_data\n kwargs = super(ClusterSerializer, self).data_info()\n if kwargs and self._detail:\n pass\n return kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n data = self._current_data\n hosts = data.host\n best_host = None\n best_perform = 0\n for host in hosts:\n host_serializer = HostSystemSerializer(host).data\n free_mem_mb = host_serializer.get('free_mem_mb')\n if free_mem_mb > best_perform:\n best_perform = free_mem_mb\n best_host = host\n return best_host\n<|end_body_1|>\n", "class_docstring": "集群数据处理", "class_name": "ClusterSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClusterSerializer:\n \"\"\"集群数据处理\"\"\"\n\n def data_info(self):\n \"\"\"* 返回字段 ** moid ** name\"\"\"\n <|body_0|>\n\n def best_host_of_mb(self):\n \"\"\"内存最优主机\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = self._current_data\n kwargs = super(ClusterSerializer, self).data_info()\n if kwargs and self._detail:\n pass\n return kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n data = self._current_data\n hosts = data.host\n best_host = None\n best_perform = 0\n for host in hosts:\n host_serializer = HostSystemSerializer(host).data\n free_mem_mb = host_serializer.get('free_mem_mb')\n if free_mem_mb > best_perform:\n best_perform = free_mem_mb\n best_host = host\n return best_host\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000113", "length_bytes": 12928, "license_type": "no_license", "methods": [{"docstring": "* 返回字段 ** moid ** name", "name": "data_info", "signature": "def data_info(self)"}, {"docstring": "内存最优主机", "name": "best_host_of_mb", "signature": "def best_host_of_mb(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000211", "prompt": "Implement the Python class `ClusterSerializer` described below.\n\nClass description:\n集群数据处理\n\nMethod signatures and docstrings:\n- def data_info(self): * 返回字段 ** moid ** name\n- def best_host_of_mb(self): 内存最优主机", "prompted_full_text": "Implement the Python class `ClusterSerializer` described below.\n\nClass description:\n集群数据处理\n\nMethod signatures and docstrings:\n- def data_info(self): * 返回字段 ** moid ** name\n- def best_host_of_mb(self): 内存最优主机\n\n<|skeleton|>\nclass ClusterSerializer:\n \"\"\"集群数据处理\"\"\"\n\n def data_info(self):\n \"\"\"* 返回字段 ** moid ** name\"\"\"\n <|body_0|>\n\n def best_host_of_mb(self):\n \"\"\"内存最优主机\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = self._current_data\n kwargs = super(ClusterSerializer, self).data_info()\n if kwargs and self._detail:\n pass\n return kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n data = self._current_data\n hosts = data.host\n best_host = None\n best_perform = 0\n for host in hosts:\n host_serializer = HostSystemSerializer(host).data\n free_mem_mb = host_serializer.get('free_mem_mb')\n if free_mem_mb > best_perform:\n best_perform = free_mem_mb\n best_host = host\n return best_host\n<|end_body_1|>\n", "revision_id": "639f11a91ee6e8b72883300cbf297ef4c0494d52", "skeleton": "<|skeleton|>\nclass ClusterSerializer:\n \"\"\"集群数据处理\"\"\"\n\n def data_info(self):\n \"\"\"* 返回字段 ** moid ** name\"\"\"\n <|body_0|>\n\n def best_host_of_mb(self):\n \"\"\"内存最优主机\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ClusterSerializer:\n \"\"\"集群数据处理\"\"\"\n\n def data_info(self):\n \"\"\"* 返回字段 ** moid ** name\"\"\"\n data = self._current_data\n kwargs = super(ClusterSerializer, self).data_info()\n if kwargs and self._detail:\n pass\n return kwargs\n\n def best_host_of_mb(self):\n \"\"\"内存最优主机\"\"\"\n data = self._current_data\n hosts = data.host\n best_host = None\n best_perform = 0\n for host in hosts:\n host_serializer = HostSystemSerializer(host).data\n free_mem_mb = host_serializer.get('free_mem_mb')\n if free_mem_mb > best_perform:\n best_perform = free_mem_mb\n best_host = host\n return best_host\n", "source": "the_stack_v2_python_sparse", "source_path": "ivmware/serializers.py", "source_repo": "caijb007/itmsp", "split": "val", "star_events_count": 0} {"blob_id": "5d0bbc4cfa1588273c832d2ecd83a09a22d122c4", "bodies": ["Parametre.__init__(self, 'carte', 'map')\nself.schema = ''\nself.aide_courte = \"affiche la carte de l'étendue\"\nself.aide_longue = \"Cette commande permet d'afficher un contexte représentant la carte (tronquée) de l'étendue. Les obstacles et les liens peuvent être édités simplement ici. Le contexte en lui-même propose beaucoup d'options et peut être difficile à manipuler pour commencer (certaines étendues sont bien plus grandes que la carte de base et il faut apprendre à naviguer dedans) mais de l'aide est mise à disposition pour vous aider à comprendre les différentes possibilités d'édition.\"", "cle = dic_masques['cle'].cle\nif cle not in type(self).importeur.salle.etendues.keys():\n personnage << \"|err|Cette clé {} n'existe pas.|ff|\".format(repr(cle))\n return\netendue = type(self).importeur.salle.etendues[cle]\nif not personnage.salle.coords.valide:\n personnage << \"|err|La salle où vous vous trouvez n'a pas de coordonnées valides.|ff|\"\n return\nx = int(personnage.salle.coords.x) - 15\ny = int(personnage.salle.coords.y) - 8\ncontexte = CarteEtendue(personnage.instance_connexion, x, y)\ncontexte.etendue = etendue\npersonnage.contexte_actuel.migrer_contexte(contexte)"], "bodies_text": "<|body_start_0|>\n Parametre.__init__(self, 'carte', 'map')\n self.schema = ''\n self.aide_courte = \"affiche la carte de l'étendue\"\n self.aide_longue = \"Cette commande permet d'afficher un contexte représentant la carte (tronquée) de l'étendue. Les obstacles et les liens peuvent être édités simplement ici. Le contexte en lui-même propose beaucoup d'options et peut être difficile à manipuler pour commencer (certaines étendues sont bien plus grandes que la carte de base et il faut apprendre à naviguer dedans) mais de l'aide est mise à disposition pour vous aider à comprendre les différentes possibilités d'édition.\"\n<|end_body_0|>\n\n<|body_start_1|>\n cle = dic_masques['cle'].cle\n if cle not in type(self).importeur.salle.etendues.keys():\n personnage << \"|err|Cette clé {} n'existe pas.|ff|\".format(repr(cle))\n return\n etendue = type(self).importeur.salle.etendues[cle]\n if not personnage.salle.coords.valide:\n personnage << \"|err|La salle où vous vous trouvez n'a pas de coordonnées valides.|ff|\"\n return\n x = int(personnage.salle.coords.x) - 15\n y = int(personnage.salle.coords.y) - 8\n contexte = CarteEtendue(personnage.instance_connexion, x, y)\n contexte.etendue = etendue\n personnage.contexte_actuel.migrer_contexte(contexte)\n<|end_body_1|>\n", "class_docstring": "Commande 'etendue carte'.", "class_name": "PrmCarte", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrmCarte:\n \"\"\"Commande 'etendue carte'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'carte', 'map')\n self.schema = ''\n self.aide_courte = \"affiche la carte de l'étendue\"\n self.aide_longue = \"Cette commande permet d'afficher un contexte représentant la carte (tronquée) de l'étendue. Les obstacles et les liens peuvent être édités simplement ici. Le contexte en lui-même propose beaucoup d'options et peut être difficile à manipuler pour commencer (certaines étendues sont bien plus grandes que la carte de base et il faut apprendre à naviguer dedans) mais de l'aide est mise à disposition pour vous aider à comprendre les différentes possibilités d'édition.\"\n<|end_body_0|>\n\n<|body_start_1|>\n cle = dic_masques['cle'].cle\n if cle not in type(self).importeur.salle.etendues.keys():\n personnage << \"|err|Cette clé {} n'existe pas.|ff|\".format(repr(cle))\n return\n etendue = type(self).importeur.salle.etendues[cle]\n if not personnage.salle.coords.valide:\n personnage << \"|err|La salle où vous vous trouvez n'a pas de coordonnées valides.|ff|\"\n return\n x = int(personnage.salle.coords.x) - 15\n y = int(personnage.salle.coords.y) - 8\n contexte = CarteEtendue(personnage.instance_connexion, x, y)\n contexte.etendue = etendue\n personnage.contexte_actuel.migrer_contexte(contexte)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000114", "length_bytes": 3590, "license_type": "permissive", "methods": [{"docstring": "Constructeur du paramètre", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Interprétation du paramètre", "name": "interpreter", "signature": "def interpreter(self, personnage, dic_masques)"}], "n_methods": 2, "prompt": "Implement the Python class `PrmCarte` described below.\n\nClass description:\nCommande 'etendue carte'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre", "prompted_full_text": "Implement the Python class `PrmCarte` described below.\n\nClass description:\nCommande 'etendue carte'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre\n\n<|skeleton|>\nclass PrmCarte:\n \"\"\"Commande 'etendue carte'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'carte', 'map')\n self.schema = ''\n self.aide_courte = \"affiche la carte de l'étendue\"\n self.aide_longue = \"Cette commande permet d'afficher un contexte représentant la carte (tronquée) de l'étendue. Les obstacles et les liens peuvent être édités simplement ici. Le contexte en lui-même propose beaucoup d'options et peut être difficile à manipuler pour commencer (certaines étendues sont bien plus grandes que la carte de base et il faut apprendre à naviguer dedans) mais de l'aide est mise à disposition pour vous aider à comprendre les différentes possibilités d'édition.\"\n<|end_body_0|>\n\n<|body_start_1|>\n cle = dic_masques['cle'].cle\n if cle not in type(self).importeur.salle.etendues.keys():\n personnage << \"|err|Cette clé {} n'existe pas.|ff|\".format(repr(cle))\n return\n etendue = type(self).importeur.salle.etendues[cle]\n if not personnage.salle.coords.valide:\n personnage << \"|err|La salle où vous vous trouvez n'a pas de coordonnées valides.|ff|\"\n return\n x = int(personnage.salle.coords.x) - 15\n y = int(personnage.salle.coords.y) - 8\n contexte = CarteEtendue(personnage.instance_connexion, x, y)\n contexte.etendue = etendue\n personnage.contexte_actuel.migrer_contexte(contexte)\n<|end_body_1|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass PrmCarte:\n \"\"\"Commande 'etendue carte'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PrmCarte:\n \"\"\"Commande 'etendue carte'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n Parametre.__init__(self, 'carte', 'map')\n self.schema = ''\n self.aide_courte = \"affiche la carte de l'étendue\"\n self.aide_longue = \"Cette commande permet d'afficher un contexte représentant la carte (tronquée) de l'étendue. Les obstacles et les liens peuvent être édités simplement ici. Le contexte en lui-même propose beaucoup d'options et peut être difficile à manipuler pour commencer (certaines étendues sont bien plus grandes que la carte de base et il faut apprendre à naviguer dedans) mais de l'aide est mise à disposition pour vous aider à comprendre les différentes possibilités d'édition.\"\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n cle = dic_masques['cle'].cle\n if cle not in type(self).importeur.salle.etendues.keys():\n personnage << \"|err|Cette clé {} n'existe pas.|ff|\".format(repr(cle))\n return\n etendue = type(self).importeur.salle.etendues[cle]\n if not personnage.salle.coords.valide:\n personnage << \"|err|La salle où vous vous trouvez n'a pas de coordonnées valides.|ff|\"\n return\n x = int(personnage.salle.coords.x) - 15\n y = int(personnage.salle.coords.y) - 8\n contexte = CarteEtendue(personnage.instance_connexion, x, y)\n contexte.etendue = etendue\n personnage.contexte_actuel.migrer_contexte(contexte)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/primaires/salle/commandes/etendue/carte.py", "source_repo": "vincent-lg/tsunami", "split": "val", "star_events_count": 5} {"blob_id": "2775a516ff89b9063062c9217b4ab619a03f763f", "bodies": ["super().__init__()\nself.num_classes = num_classes\nself.num_grid = num_grid\nself.num_bbox = num_bbox\nself.image_shapes = image_shapes\nself.num_threshold = num_threshold", "dtype = yolo_bbox.dtype\ndevice = yolo_bbox.device\nH, W = image_shape\ngrid_idx = torch.arange(self.num_grid, dtype=dtype, device=device).unsqueeze(1)\nx_grid = grid_idx.repeat(self.num_grid, 1).flatten()\ny_grid = grid_idx.repeat(1, self.num_grid).flatten()\nx0 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid - yolo_bbox[:, 2] / 2).unsqueeze(1) * W\ny0 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid - yolo_bbox[:, 3] / 2).unsqueeze(1) * H\nx1 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid + yolo_bbox[:, 2] / 2).unsqueeze(1) * W\ny1 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid + yolo_bbox[:, 3] / 2).unsqueeze(1) * H\nreturn torch.cat((x0, y0, x1, y1, yolo_bbox[:, 4].unsqueeze(1)), dim=1)", "device = class_logits.device\nnum_classes = class_logits.shape[-1]\npred_probs = F.softmax(class_logits, -1)\nall_masks = []\nall_boxes = []\nimage_shape = image_shapes\nfor pred_boxes, probs in zip(proposals, pred_probs):\n boxes = self.boxes_decode(pred_boxes, image_shape)\n reserve_mask = boxes[:, -1] > 0.1\n boxes = boxes[reserve_mask]\n probs = probs[reserve_mask]\n boxes, scores = (boxes[:, :4], boxes[:, -1])\n boxes = detection_utils.clip_boxes_to_image(boxes, image_shape)\n labels = torch.argmax(probs, dim=-1)\n boxes = boxes.reshape(-1, 4)\n scores = scores.flatten()\n labels = labels.flatten()\n keep_mask = detection_utils.batched_nms(boxes, scores, labels, self.num_threshold)\n boxes, scores = (boxes[keep_mask], scores[keep_mask])\n mask_dict = {'reserve_mask': reserve_mask, 'keep_mask': keep_mask}\n all_boxes.append(boxes)\n all_masks.append(mask_dict)\nreturn (all_boxes, all_masks)", "N = x.shape[0]\nx = x.view(N, -1, self.num_classes + 5 * self.num_bbox)\nclass_logits = x[..., :self.num_classes]\nyolo_bboxes = x[..., self.num_classes:].view(-1, self.num_bbox, 5)\nyolo_bboxes = yolo_bboxes[range(N * self.num_grid ** 2), yolo_bboxes[..., -1].max(-1)[1]].reshape(N, -1, 5)\npred_boxes, pred_masks = self.postprocess_detections(class_logits, yolo_bboxes, self.image_shapes)\nreturn (class_logits, yolo_bboxes, pred_boxes, pred_masks)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.num_classes = num_classes\n self.num_grid = num_grid\n self.num_bbox = num_bbox\n self.image_shapes = image_shapes\n self.num_threshold = num_threshold\n<|end_body_0|>\n\n<|body_start_1|>\n dtype = yolo_bbox.dtype\n device = yolo_bbox.device\n H, W = image_shape\n grid_idx = torch.arange(self.num_grid, dtype=dtype, device=device).unsqueeze(1)\n x_grid = grid_idx.repeat(self.num_grid, 1).flatten()\n y_grid = grid_idx.repeat(1, self.num_grid).flatten()\n x0 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid - yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y0 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid - yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n x1 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid + yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y1 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid + yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n return torch.cat((x0, y0, x1, y1, yolo_bbox[:, 4].unsqueeze(1)), dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n device = class_logits.device\n num_classes = class_logits.shape[-1]\n pred_probs = F.softmax(class_logits, -1)\n all_masks = []\n all_boxes = []\n image_shape = image_shapes\n for pred_boxes, probs in zip(proposals, pred_probs):\n boxes = self.boxes_decode(pred_boxes, image_shape)\n reserve_mask = boxes[:, -1] > 0.1\n boxes = boxes[reserve_mask]\n probs = probs[reserve_mask]\n boxes, scores = (boxes[:, :4], boxes[:, -1])\n boxes = detection_utils.clip_boxes_to_image(boxes, image_shape)\n labels = torch.argmax(probs, dim=-1)\n boxes = boxes.reshape(-1, 4)\n scores = scores.flatten()\n labels = labels.flatten()\n keep_mask = detection_utils.batched_nms(boxes, scores, labels, self.num_threshold)\n boxes, scores = (boxes[keep_mask], scores[keep_mask])\n mask_dict = {'reserve_mask': reserve_mask, 'keep_mask': keep_mask}\n all_boxes.append(boxes)\n all_masks.append(mask_dict)\n return (all_boxes, all_masks)\n<|end_body_2|>\n\n<|body_start_3|>\n N = x.shape[0]\n x = x.view(N, -1, self.num_classes + 5 * self.num_bbox)\n class_logits = x[..., :self.num_classes]\n yolo_bboxes = x[..., self.num_classes:].view(-1, self.num_bbox, 5)\n yolo_bboxes = yolo_bboxes[range(N * self.num_grid ** 2), yolo_bboxes[..., -1].max(-1)[1]].reshape(N, -1, 5)\n pred_boxes, pred_masks = self.postprocess_detections(class_logits, yolo_bboxes, self.image_shapes)\n return (class_logits, yolo_bboxes, pred_boxes, pred_masks)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "YOLOv1Postprocess", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YOLOv1Postprocess:\n\n def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold):\n \"\"\"Constructor for YOLOv1Postprocess\"\"\"\n <|body_0|>\n\n def boxes_decode(self, yolo_bbox, image_shape):\n \"\"\"Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\"\"\"\n <|body_1|>\n\n def postprocess_detections(self, class_logits, proposals, image_shapes):\n \"\"\"Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.num_classes = num_classes\n self.num_grid = num_grid\n self.num_bbox = num_bbox\n self.image_shapes = image_shapes\n self.num_threshold = num_threshold\n<|end_body_0|>\n\n<|body_start_1|>\n dtype = yolo_bbox.dtype\n device = yolo_bbox.device\n H, W = image_shape\n grid_idx = torch.arange(self.num_grid, dtype=dtype, device=device).unsqueeze(1)\n x_grid = grid_idx.repeat(self.num_grid, 1).flatten()\n y_grid = grid_idx.repeat(1, self.num_grid).flatten()\n x0 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid - yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y0 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid - yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n x1 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid + yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y1 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid + yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n return torch.cat((x0, y0, x1, y1, yolo_bbox[:, 4].unsqueeze(1)), dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n device = class_logits.device\n num_classes = class_logits.shape[-1]\n pred_probs = F.softmax(class_logits, -1)\n all_masks = []\n all_boxes = []\n image_shape = image_shapes\n for pred_boxes, probs in zip(proposals, pred_probs):\n boxes = self.boxes_decode(pred_boxes, image_shape)\n reserve_mask = boxes[:, -1] > 0.1\n boxes = boxes[reserve_mask]\n probs = probs[reserve_mask]\n boxes, scores = (boxes[:, :4], boxes[:, -1])\n boxes = detection_utils.clip_boxes_to_image(boxes, image_shape)\n labels = torch.argmax(probs, dim=-1)\n boxes = boxes.reshape(-1, 4)\n scores = scores.flatten()\n labels = labels.flatten()\n keep_mask = detection_utils.batched_nms(boxes, scores, labels, self.num_threshold)\n boxes, scores = (boxes[keep_mask], scores[keep_mask])\n mask_dict = {'reserve_mask': reserve_mask, 'keep_mask': keep_mask}\n all_boxes.append(boxes)\n all_masks.append(mask_dict)\n return (all_boxes, all_masks)\n<|end_body_2|>\n\n<|body_start_3|>\n N = x.shape[0]\n x = x.view(N, -1, self.num_classes + 5 * self.num_bbox)\n class_logits = x[..., :self.num_classes]\n yolo_bboxes = x[..., self.num_classes:].view(-1, self.num_bbox, 5)\n yolo_bboxes = yolo_bboxes[range(N * self.num_grid ** 2), yolo_bboxes[..., -1].max(-1)[1]].reshape(N, -1, 5)\n pred_boxes, pred_masks = self.postprocess_detections(class_logits, yolo_bboxes, self.image_shapes)\n return (class_logits, yolo_bboxes, pred_boxes, pred_masks)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000115", "length_bytes": 10177, "license_type": "permissive", "methods": [{"docstring": "Constructor for YOLOv1Postprocess", "name": "__init__", "signature": "def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold)"}, {"docstring": "Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])", "name": "boxes_decode", "signature": "def boxes_decode(self, yolo_bbox, image_shape)"}, {"docstring": "Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:", "name": "postprocess_detections", "signature": "def postprocess_detections(self, class_logits, proposals, image_shapes)"}, {"docstring": "Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000372", "prompt": "Implement the Python class `YOLOv1Postprocess` described below.\n\nClass description:\nImplement the YOLOv1Postprocess class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold): Constructor for YOLOv1Postprocess\n- def boxes_decode(self, yolo_bbox, image_shape): Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\n- def postprocess_detections(self, class_logits, proposals, image_shapes): Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\n- def forward(self, x): Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))", "prompted_full_text": "Implement the Python class `YOLOv1Postprocess` described below.\n\nClass description:\nImplement the YOLOv1Postprocess class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold): Constructor for YOLOv1Postprocess\n- def boxes_decode(self, yolo_bbox, image_shape): Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\n- def postprocess_detections(self, class_logits, proposals, image_shapes): Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\n- def forward(self, x): Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))\n\n<|skeleton|>\nclass YOLOv1Postprocess:\n\n def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold):\n \"\"\"Constructor for YOLOv1Postprocess\"\"\"\n <|body_0|>\n\n def boxes_decode(self, yolo_bbox, image_shape):\n \"\"\"Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\"\"\"\n <|body_1|>\n\n def postprocess_detections(self, class_logits, proposals, image_shapes):\n \"\"\"Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.num_classes = num_classes\n self.num_grid = num_grid\n self.num_bbox = num_bbox\n self.image_shapes = image_shapes\n self.num_threshold = num_threshold\n<|end_body_0|>\n\n<|body_start_1|>\n dtype = yolo_bbox.dtype\n device = yolo_bbox.device\n H, W = image_shape\n grid_idx = torch.arange(self.num_grid, dtype=dtype, device=device).unsqueeze(1)\n x_grid = grid_idx.repeat(self.num_grid, 1).flatten()\n y_grid = grid_idx.repeat(1, self.num_grid).flatten()\n x0 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid - yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y0 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid - yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n x1 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid + yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y1 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid + yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n return torch.cat((x0, y0, x1, y1, yolo_bbox[:, 4].unsqueeze(1)), dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n device = class_logits.device\n num_classes = class_logits.shape[-1]\n pred_probs = F.softmax(class_logits, -1)\n all_masks = []\n all_boxes = []\n image_shape = image_shapes\n for pred_boxes, probs in zip(proposals, pred_probs):\n boxes = self.boxes_decode(pred_boxes, image_shape)\n reserve_mask = boxes[:, -1] > 0.1\n boxes = boxes[reserve_mask]\n probs = probs[reserve_mask]\n boxes, scores = (boxes[:, :4], boxes[:, -1])\n boxes = detection_utils.clip_boxes_to_image(boxes, image_shape)\n labels = torch.argmax(probs, dim=-1)\n boxes = boxes.reshape(-1, 4)\n scores = scores.flatten()\n labels = labels.flatten()\n keep_mask = detection_utils.batched_nms(boxes, scores, labels, self.num_threshold)\n boxes, scores = (boxes[keep_mask], scores[keep_mask])\n mask_dict = {'reserve_mask': reserve_mask, 'keep_mask': keep_mask}\n all_boxes.append(boxes)\n all_masks.append(mask_dict)\n return (all_boxes, all_masks)\n<|end_body_2|>\n\n<|body_start_3|>\n N = x.shape[0]\n x = x.view(N, -1, self.num_classes + 5 * self.num_bbox)\n class_logits = x[..., :self.num_classes]\n yolo_bboxes = x[..., self.num_classes:].view(-1, self.num_bbox, 5)\n yolo_bboxes = yolo_bboxes[range(N * self.num_grid ** 2), yolo_bboxes[..., -1].max(-1)[1]].reshape(N, -1, 5)\n pred_boxes, pred_masks = self.postprocess_detections(class_logits, yolo_bboxes, self.image_shapes)\n return (class_logits, yolo_bboxes, pred_boxes, pred_masks)\n<|end_body_3|>\n", "revision_id": "667488e41878d7f0376142a7ae9e1b43c0edd68a", "skeleton": "<|skeleton|>\nclass YOLOv1Postprocess:\n\n def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold):\n \"\"\"Constructor for YOLOv1Postprocess\"\"\"\n <|body_0|>\n\n def boxes_decode(self, yolo_bbox, image_shape):\n \"\"\"Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\"\"\"\n <|body_1|>\n\n def postprocess_detections(self, class_logits, proposals, image_shapes):\n \"\"\"Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class YOLOv1Postprocess:\n def __init__(self, num_classes, num_grid, num_bbox, image_shapes, num_threshold):\n \"\"\"Constructor for YOLOv1Postprocess\"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_grid = num_grid\n self.num_bbox = num_bbox\n self.image_shapes = image_shapes\n self.num_threshold = num_threshold\n\n def boxes_decode(self, yolo_bbox, image_shape):\n \"\"\"Args: yolo_bbox: (Tensor[num_grid**2, 5]) image_shape: Tuple(height, weight) Returns: transformed_boxes: (Tensor[N, num_grid**2, 5])\"\"\"\n dtype = yolo_bbox.dtype\n device = yolo_bbox.device\n H, W = image_shape\n grid_idx = torch.arange(self.num_grid, dtype=dtype, device=device).unsqueeze(1)\n x_grid = grid_idx.repeat(self.num_grid, 1).flatten()\n y_grid = grid_idx.repeat(1, self.num_grid).flatten()\n x0 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid - yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y0 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid - yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n x1 = ((yolo_bbox[:, 0] + x_grid) / self.num_grid + yolo_bbox[:, 2] / 2).unsqueeze(1) * W\n y1 = ((yolo_bbox[:, 1] + y_grid) / self.num_grid + yolo_bbox[:, 3] / 2).unsqueeze(1) * H\n return torch.cat((x0, y0, x1, y1, yolo_bbox[:, 4].unsqueeze(1)), dim=1)\n\n def postprocess_detections(self, class_logits, proposals, image_shapes):\n \"\"\"Args: class_logits: (Tensor[N, num_grid**2, classes]), proposals: (Tensor[N, num_grid**2, 5]) image_shapes: (Tuple[height, width]): size of the image Returns:\"\"\"\n device = class_logits.device\n num_classes = class_logits.shape[-1]\n pred_probs = F.softmax(class_logits, -1)\n all_masks = []\n all_boxes = []\n image_shape = image_shapes\n for pred_boxes, probs in zip(proposals, pred_probs):\n boxes = self.boxes_decode(pred_boxes, image_shape)\n reserve_mask = boxes[:, -1] > 0.1\n boxes = boxes[reserve_mask]\n probs = probs[reserve_mask]\n boxes, scores = (boxes[:, :4], boxes[:, -1])\n boxes = detection_utils.clip_boxes_to_image(boxes, image_shape)\n labels = torch.argmax(probs, dim=-1)\n boxes = boxes.reshape(-1, 4)\n scores = scores.flatten()\n labels = labels.flatten()\n keep_mask = detection_utils.batched_nms(boxes, scores, labels, self.num_threshold)\n boxes, scores = (boxes[keep_mask], scores[keep_mask])\n mask_dict = {'reserve_mask': reserve_mask, 'keep_mask': keep_mask}\n all_boxes.append(boxes)\n all_masks.append(mask_dict)\n return (all_boxes, all_masks)\n\n def forward(self, x):\n \"\"\"Args: x: Tensor(N, out_features) out_features = num_grid**2 * (num_classes + num_bbox * 5) Returns: all_boxes: List(Tensor) all_scores: List(Tensor) all_masks: List(Dict(Tensor))\"\"\"\n N = x.shape[0]\n x = x.view(N, -1, self.num_classes + 5 * self.num_bbox)\n class_logits = x[..., :self.num_classes]\n yolo_bboxes = x[..., self.num_classes:].view(-1, self.num_bbox, 5)\n yolo_bboxes = yolo_bboxes[range(N * self.num_grid ** 2), yolo_bboxes[..., -1].max(-1)[1]].reshape(N, -1, 5)\n pred_boxes, pred_masks = self.postprocess_detections(class_logits, yolo_bboxes, self.image_shapes)\n return (class_logits, yolo_bboxes, pred_boxes, pred_masks)\n", "source": "the_stack_v2_python_sparse", "source_path": "cvmodels/models/yolo_v1.py", "source_repo": "welkin-feng/ComputerVision", "split": "val", "star_events_count": 3} {"blob_id": "fc172eb3354166984cc3c6c08ee8c531dfccc707", "bodies": ["task = kwargs.pop('task', None)\nsuper(self.__class__, self).__init__(*args, **kwargs)\nself.fields['sub_tasks'].queryset = Task.objects.filter(parent=task) if task else Task.objects.none()", "sub_tasks = self.cleaned_data['sub_tasks']\nfor sub_task in sub_tasks:\n if task.parent == sub_task:\n continue\n sub_task.parent = task\n sub_task.save()"], "bodies_text": "<|body_start_0|>\n task = kwargs.pop('task', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['sub_tasks'].queryset = Task.objects.filter(parent=task) if task else Task.objects.none()\n<|end_body_0|>\n\n<|body_start_1|>\n sub_tasks = self.cleaned_data['sub_tasks']\n for sub_task in sub_tasks:\n if task.parent == sub_task:\n continue\n sub_task.parent = task\n sub_task.save()\n<|end_body_1|>\n", "class_docstring": "Form representing sub task", "class_name": "SubTaskForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SubTaskForm:\n \"\"\"Form representing sub task\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"choice field values willl be filled during form load\"\"\"\n <|body_0|>\n\n def save(self, task):\n \"\"\"save the sub task for a task\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task = kwargs.pop('task', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['sub_tasks'].queryset = Task.objects.filter(parent=task) if task else Task.objects.none()\n<|end_body_0|>\n\n<|body_start_1|>\n sub_tasks = self.cleaned_data['sub_tasks']\n for sub_task in sub_tasks:\n if task.parent == sub_task:\n continue\n sub_task.parent = task\n sub_task.save()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000116", "length_bytes": 5361, "license_type": "no_license", "methods": [{"docstring": "choice field values willl be filled during form load", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "save the sub task for a task", "name": "save", "signature": "def save(self, task)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000056", "prompt": "Implement the Python class `SubTaskForm` described below.\n\nClass description:\nForm representing sub task\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): choice field values willl be filled during form load\n- def save(self, task): save the sub task for a task", "prompted_full_text": "Implement the Python class `SubTaskForm` described below.\n\nClass description:\nForm representing sub task\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): choice field values willl be filled during form load\n- def save(self, task): save the sub task for a task\n\n<|skeleton|>\nclass SubTaskForm:\n \"\"\"Form representing sub task\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"choice field values willl be filled during form load\"\"\"\n <|body_0|>\n\n def save(self, task):\n \"\"\"save the sub task for a task\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task = kwargs.pop('task', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['sub_tasks'].queryset = Task.objects.filter(parent=task) if task else Task.objects.none()\n<|end_body_0|>\n\n<|body_start_1|>\n sub_tasks = self.cleaned_data['sub_tasks']\n for sub_task in sub_tasks:\n if task.parent == sub_task:\n continue\n sub_task.parent = task\n sub_task.save()\n<|end_body_1|>\n", "revision_id": "7a337e0e3a20180b9564de68ab22620dc9aa1a36", "skeleton": "<|skeleton|>\nclass SubTaskForm:\n \"\"\"Form representing sub task\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"choice field values willl be filled during form load\"\"\"\n <|body_0|>\n\n def save(self, task):\n \"\"\"save the sub task for a task\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SubTaskForm:\n \"\"\"Form representing sub task\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"choice field values willl be filled during form load\"\"\"\n task = kwargs.pop('task', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['sub_tasks'].queryset = Task.objects.filter(parent=task) if task else Task.objects.none()\n\n def save(self, task):\n \"\"\"save the sub task for a task\"\"\"\n sub_tasks = self.cleaned_data['sub_tasks']\n for sub_task in sub_tasks:\n if task.parent == sub_task:\n continue\n sub_task.parent = task\n sub_task.save()\n", "source": "the_stack_v2_python_sparse", "source_path": "project_management/tasks/forms.py", "source_repo": "raveena17/ILASM", "split": "val", "star_events_count": 0} {"blob_id": "7020c10ae6df9a131d61ab910d907159c820a7ff", "bodies": ["if Flavour.objects.filter(flavour=value.lower()):\n raise serializers.ValidationError('There already exist such flavour')\nreturn value", "ret = super().to_representation(instance)\nret['flavour'] = ret['flavour'].lower()\nreturn ret"], "bodies_text": "<|body_start_0|>\n if Flavour.objects.filter(flavour=value.lower()):\n raise serializers.ValidationError('There already exist such flavour')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n ret = super().to_representation(instance)\n ret['flavour'] = ret['flavour'].lower()\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AddFlavourSerializers", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AddFlavourSerializers:\n\n def validate_flavour(self, value):\n \"\"\"Check the duplicate\"\"\"\n <|body_0|>\n\n def to_representation(self, instance):\n \"\"\"Convert `flavour` to lowercase.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if Flavour.objects.filter(flavour=value.lower()):\n raise serializers.ValidationError('There already exist such flavour')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n ret = super().to_representation(instance)\n ret['flavour'] = ret['flavour'].lower()\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000117", "length_bytes": 4598, "license_type": "permissive", "methods": [{"docstring": "Check the duplicate", "name": "validate_flavour", "signature": "def validate_flavour(self, value)"}, {"docstring": "Convert `flavour` to lowercase.", "name": "to_representation", "signature": "def to_representation(self, instance)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003648", "prompt": "Implement the Python class `AddFlavourSerializers` described below.\n\nClass description:\nImplement the AddFlavourSerializers class.\n\nMethod signatures and docstrings:\n- def validate_flavour(self, value): Check the duplicate\n- def to_representation(self, instance): Convert `flavour` to lowercase.", "prompted_full_text": "Implement the Python class `AddFlavourSerializers` described below.\n\nClass description:\nImplement the AddFlavourSerializers class.\n\nMethod signatures and docstrings:\n- def validate_flavour(self, value): Check the duplicate\n- def to_representation(self, instance): Convert `flavour` to lowercase.\n\n<|skeleton|>\nclass AddFlavourSerializers:\n\n def validate_flavour(self, value):\n \"\"\"Check the duplicate\"\"\"\n <|body_0|>\n\n def to_representation(self, instance):\n \"\"\"Convert `flavour` to lowercase.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if Flavour.objects.filter(flavour=value.lower()):\n raise serializers.ValidationError('There already exist such flavour')\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n ret = super().to_representation(instance)\n ret['flavour'] = ret['flavour'].lower()\n return ret\n<|end_body_1|>\n", "revision_id": "6a935bb77db3996dcf14b71deed8d7ca5c8f0fa3", "skeleton": "<|skeleton|>\nclass AddFlavourSerializers:\n\n def validate_flavour(self, value):\n \"\"\"Check the duplicate\"\"\"\n <|body_0|>\n\n def to_representation(self, instance):\n \"\"\"Convert `flavour` to lowercase.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AddFlavourSerializers:\n def validate_flavour(self, value):\n \"\"\"Check the duplicate\"\"\"\n if Flavour.objects.filter(flavour=value.lower()):\n raise serializers.ValidationError('There already exist such flavour')\n return value\n\n def to_representation(self, instance):\n \"\"\"Convert `flavour` to lowercase.\"\"\"\n ret = super().to_representation(instance)\n ret['flavour'] = ret['flavour'].lower()\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "drf_api/serializers.py", "source_repo": "destro6984/LynxWasp", "split": "val", "star_events_count": 0} {"blob_id": "deddc093edcbd0ecbe5e5a821330de0d03642b86", "bodies": ["if not kwargs['reservation'].can_change(request.user) and (not kwargs['reservation'].can_change_end_time(request.user)):\n return redirect('my_reservations')\nreturn super().dispatch(request, *args, **kwargs)", "reservation = kwargs['reservation']\nif reservation.machine != form.cleaned_data['machine']:\n return redirect('my_reservations')\nreservation.comment = form.cleaned_data['comment']\nreservation.start_time = form.cleaned_data['start_time']\nreservation.end_time = form.cleaned_data['end_time']\nif reservation.event:\n reservation.event = form.cleaned_data['event']\nif reservation.special:\n reservation.special_text = form.cleaned_data['special_text']\nreturn self.validate_and_save(reservation, form)"], "bodies_text": "<|body_start_0|>\n if not kwargs['reservation'].can_change(request.user) and (not kwargs['reservation'].can_change_end_time(request.user)):\n return redirect('my_reservations')\n return super().dispatch(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n reservation = kwargs['reservation']\n if reservation.machine != form.cleaned_data['machine']:\n return redirect('my_reservations')\n reservation.comment = form.cleaned_data['comment']\n reservation.start_time = form.cleaned_data['start_time']\n reservation.end_time = form.cleaned_data['end_time']\n if reservation.event:\n reservation.event = form.cleaned_data['event']\n if reservation.special:\n reservation.special_text = form.cleaned_data['special_text']\n return self.validate_and_save(reservation, form)\n<|end_body_1|>\n", "class_docstring": "View for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)", "class_name": "ChangeReservationView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChangeReservationView:\n \"\"\"View for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\"\"\"\n <|body_0|>\n\n def form_valid(self, form, **kwargs):\n \"\"\"Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs['reservation'].can_change(request.user) and (not kwargs['reservation'].can_change_end_time(request.user)):\n return redirect('my_reservations')\n return super().dispatch(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n reservation = kwargs['reservation']\n if reservation.machine != form.cleaned_data['machine']:\n return redirect('my_reservations')\n reservation.comment = form.cleaned_data['comment']\n reservation.start_time = form.cleaned_data['start_time']\n reservation.end_time = form.cleaned_data['end_time']\n if reservation.event:\n reservation.event = form.cleaned_data['event']\n if reservation.special:\n reservation.special_text = form.cleaned_data['special_text']\n return self.validate_and_save(reservation, form)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000118", "length_bytes": 12808, "license_type": "permissive", "methods": [{"docstring": "Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request", "name": "dispatch", "signature": "def dispatch(self, request, *args, **kwargs)"}, {"docstring": "Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response", "name": "form_valid", "signature": "def form_valid(self, form, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002086", "prompt": "Implement the Python class `ChangeReservationView` described below.\n\nClass description:\nView for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\n\nMethod signatures and docstrings:\n- def dispatch(self, request, *args, **kwargs): Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\n- def form_valid(self, form, **kwargs): Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response", "prompted_full_text": "Implement the Python class `ChangeReservationView` described below.\n\nClass description:\nView for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\n\nMethod signatures and docstrings:\n- def dispatch(self, request, *args, **kwargs): Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\n- def form_valid(self, form, **kwargs): Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response\n\n<|skeleton|>\nclass ChangeReservationView:\n \"\"\"View for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\"\"\"\n <|body_0|>\n\n def form_valid(self, form, **kwargs):\n \"\"\"Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not kwargs['reservation'].can_change(request.user) and (not kwargs['reservation'].can_change_end_time(request.user)):\n return redirect('my_reservations')\n return super().dispatch(request, *args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n reservation = kwargs['reservation']\n if reservation.machine != form.cleaned_data['machine']:\n return redirect('my_reservations')\n reservation.comment = form.cleaned_data['comment']\n reservation.start_time = form.cleaned_data['start_time']\n reservation.end_time = form.cleaned_data['end_time']\n if reservation.event:\n reservation.event = form.cleaned_data['event']\n if reservation.special:\n reservation.special_text = form.cleaned_data['special_text']\n return self.validate_and_save(reservation, form)\n<|end_body_1|>\n", "revision_id": "1d190a86e3277315804bfcc0b8f9abd4f9c1d780", "skeleton": "<|skeleton|>\nclass ChangeReservationView:\n \"\"\"View for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\"\"\"\n <|body_0|>\n\n def form_valid(self, form, **kwargs):\n \"\"\"Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ChangeReservationView:\n \"\"\"View for changing a reservation (Cannot be UpdateView due to the abstract inheritance of reservations)\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Redirects the user to it's reservation page if the given reservation cannot be changed :param request: The HTTP request\"\"\"\n if not kwargs['reservation'].can_change(request.user) and (not kwargs['reservation'].can_change_end_time(request.user)):\n return redirect('my_reservations')\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form, **kwargs):\n \"\"\"Handles updating the reservation if the form is valid, otherwise render the form view with an error code :param form: The valid form :return HTTP Response\"\"\"\n reservation = kwargs['reservation']\n if reservation.machine != form.cleaned_data['machine']:\n return redirect('my_reservations')\n reservation.comment = form.cleaned_data['comment']\n reservation.start_time = form.cleaned_data['start_time']\n reservation.end_time = form.cleaned_data['end_time']\n if reservation.event:\n reservation.event = form.cleaned_data['event']\n if reservation.special:\n reservation.special_text = form.cleaned_data['special_text']\n return self.validate_and_save(reservation, form)\n", "source": "the_stack_v2_python_sparse", "source_path": "make_queue/views/reservation/reservation.py", "source_repo": "mahoyen/web", "split": "val", "star_events_count": 0} {"blob_id": "cf827b6645b25cdf3f34dd200355da1b2f8396c5", "bodies": ["self.agents = agents\nself.cluster_source_type = cluster_source_type\nself.host_name = host_name\nself.host_type = host_type\nself.id = id\nself.is_proxy_host = is_proxy_host\nself.memory_size_bytes = memory_size_bytes\nself.name = name\nself.networking_info = networking_info\nself.num_processors = num_processors\nself.os_name = os_name\nself.mtype = mtype\nself.vcs_version = vcs_version\nself.volumes = volumes\nself.vsswriters = vsswriters", "if dictionary is None:\n return None\nagents = None\nif dictionary.get('agents') != None:\n agents = list()\n for structure in dictionary.get('agents'):\n agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))\ncluster_source_type = dictionary.get('clusterSourceType')\nhost_name = dictionary.get('hostName')\nhost_type = dictionary.get('hostType')\nid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None\nis_proxy_host = dictionary.get('isProxyHost')\nmemory_size_bytes = dictionary.get('memorySizeBytes')\nname = dictionary.get('name')\nnetworking_info = cohesity_management_sdk.models.networking_information.NetworkingInformation.from_dictionary(dictionary.get('networkingInfo')) if dictionary.get('networkingInfo') else None\nnum_processors = dictionary.get('numProcessors')\nos_name = dictionary.get('osName')\nmtype = dictionary.get('type')\nvcs_version = dictionary.get('vcsVersion')\nvolumes = None\nif dictionary.get('volumes') != None:\n volumes = list()\n for structure in dictionary.get('volumes'):\n volumes.append(cohesity_management_sdk.models.physical_volume.PhysicalVolume.from_dictionary(structure))\nvsswriters = None\nif dictionary.get('vsswriters') != None:\n vsswriters = list()\n for structure in dictionary.get('vsswriters'):\n vsswriters.append(cohesity_management_sdk.models.vss_writer.VssWriter.from_dictionary(structure))\nreturn cls(agents, cluster_source_type, host_name, host_type, id, is_proxy_host, memory_size_bytes, name, networking_info, num_processors, os_name, mtype, vcs_version, volumes, vsswriters)"], "bodies_text": "<|body_start_0|>\n self.agents = agents\n self.cluster_source_type = cluster_source_type\n self.host_name = host_name\n self.host_type = host_type\n self.id = id\n self.is_proxy_host = is_proxy_host\n self.memory_size_bytes = memory_size_bytes\n self.name = name\n self.networking_info = networking_info\n self.num_processors = num_processors\n self.os_name = os_name\n self.mtype = mtype\n self.vcs_version = vcs_version\n self.volumes = volumes\n self.vsswriters = vsswriters\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agents = None\n if dictionary.get('agents') != None:\n agents = list()\n for structure in dictionary.get('agents'):\n agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))\n cluster_source_type = dictionary.get('clusterSourceType')\n host_name = dictionary.get('hostName')\n host_type = dictionary.get('hostType')\n id = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None\n is_proxy_host = dictionary.get('isProxyHost')\n memory_size_bytes = dictionary.get('memorySizeBytes')\n name = dictionary.get('name')\n networking_info = cohesity_management_sdk.models.networking_information.NetworkingInformation.from_dictionary(dictionary.get('networkingInfo')) if dictionary.get('networkingInfo') else None\n num_processors = dictionary.get('numProcessors')\n os_name = dictionary.get('osName')\n mtype = dictionary.get('type')\n vcs_version = dictionary.get('vcsVersion')\n volumes = None\n if dictionary.get('volumes') != None:\n volumes = list()\n for structure in dictionary.get('volumes'):\n volumes.append(cohesity_management_sdk.models.physical_volume.PhysicalVolume.from_dictionary(structure))\n vsswriters = None\n if dictionary.get('vsswriters') != None:\n vsswriters = list()\n for structure in dictionary.get('vsswriters'):\n vsswriters.append(cohesity_management_sdk.models.vss_writer.VssWriter.from_dictionary(structure))\n return cls(agents, cluster_source_type, host_name, host_type, id, is_proxy_host, memory_size_bytes, name, networking_info, num_processors, os_name, mtype, vcs_version, volumes, vsswriters)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So", "class_name": "PhysicalProtectionSource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PhysicalProtectionSource:\n \"\"\"Implementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\"\"\"\n\n def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None):\n \"\"\"Constructor for the PhysicalProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.agents = agents\n self.cluster_source_type = cluster_source_type\n self.host_name = host_name\n self.host_type = host_type\n self.id = id\n self.is_proxy_host = is_proxy_host\n self.memory_size_bytes = memory_size_bytes\n self.name = name\n self.networking_info = networking_info\n self.num_processors = num_processors\n self.os_name = os_name\n self.mtype = mtype\n self.vcs_version = vcs_version\n self.volumes = volumes\n self.vsswriters = vsswriters\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agents = None\n if dictionary.get('agents') != None:\n agents = list()\n for structure in dictionary.get('agents'):\n agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))\n cluster_source_type = dictionary.get('clusterSourceType')\n host_name = dictionary.get('hostName')\n host_type = dictionary.get('hostType')\n id = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None\n is_proxy_host = dictionary.get('isProxyHost')\n memory_size_bytes = dictionary.get('memorySizeBytes')\n name = dictionary.get('name')\n networking_info = cohesity_management_sdk.models.networking_information.NetworkingInformation.from_dictionary(dictionary.get('networkingInfo')) if dictionary.get('networkingInfo') else None\n num_processors = dictionary.get('numProcessors')\n os_name = dictionary.get('osName')\n mtype = dictionary.get('type')\n vcs_version = dictionary.get('vcsVersion')\n volumes = None\n if dictionary.get('volumes') != None:\n volumes = list()\n for structure in dictionary.get('volumes'):\n volumes.append(cohesity_management_sdk.models.physical_volume.PhysicalVolume.from_dictionary(structure))\n vsswriters = None\n if dictionary.get('vsswriters') != None:\n vsswriters = list()\n for structure in dictionary.get('vsswriters'):\n vsswriters.append(cohesity_management_sdk.models.vss_writer.VssWriter.from_dictionary(structure))\n return cls(agents, cluster_source_type, host_name, host_type, id, is_proxy_host, memory_size_bytes, name, networking_info, num_processors, os_name, mtype, vcs_version, volumes, vsswriters)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000119", "length_bytes": 7793, "license_type": "permissive", "methods": [{"docstring": "Constructor for the PhysicalProtectionSource class", "name": "__init__", "signature": "def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `PhysicalProtectionSource` described below.\n\nClass description:\nImplementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\n\nMethod signatures and docstrings:\n- def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None): Constructor for the PhysicalProtectionSource class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `PhysicalProtectionSource` described below.\n\nClass description:\nImplementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\n\nMethod signatures and docstrings:\n- def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None): Constructor for the PhysicalProtectionSource class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass PhysicalProtectionSource:\n \"\"\"Implementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\"\"\"\n\n def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None):\n \"\"\"Constructor for the PhysicalProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.agents = agents\n self.cluster_source_type = cluster_source_type\n self.host_name = host_name\n self.host_type = host_type\n self.id = id\n self.is_proxy_host = is_proxy_host\n self.memory_size_bytes = memory_size_bytes\n self.name = name\n self.networking_info = networking_info\n self.num_processors = num_processors\n self.os_name = os_name\n self.mtype = mtype\n self.vcs_version = vcs_version\n self.volumes = volumes\n self.vsswriters = vsswriters\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agents = None\n if dictionary.get('agents') != None:\n agents = list()\n for structure in dictionary.get('agents'):\n agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))\n cluster_source_type = dictionary.get('clusterSourceType')\n host_name = dictionary.get('hostName')\n host_type = dictionary.get('hostType')\n id = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None\n is_proxy_host = dictionary.get('isProxyHost')\n memory_size_bytes = dictionary.get('memorySizeBytes')\n name = dictionary.get('name')\n networking_info = cohesity_management_sdk.models.networking_information.NetworkingInformation.from_dictionary(dictionary.get('networkingInfo')) if dictionary.get('networkingInfo') else None\n num_processors = dictionary.get('numProcessors')\n os_name = dictionary.get('osName')\n mtype = dictionary.get('type')\n vcs_version = dictionary.get('vcsVersion')\n volumes = None\n if dictionary.get('volumes') != None:\n volumes = list()\n for structure in dictionary.get('volumes'):\n volumes.append(cohesity_management_sdk.models.physical_volume.PhysicalVolume.from_dictionary(structure))\n vsswriters = None\n if dictionary.get('vsswriters') != None:\n vsswriters = list()\n for structure in dictionary.get('vsswriters'):\n vsswriters.append(cohesity_management_sdk.models.vss_writer.VssWriter.from_dictionary(structure))\n return cls(agents, cluster_source_type, host_name, host_type, id, is_proxy_host, memory_size_bytes, name, networking_info, num_processors, os_name, mtype, vcs_version, volumes, vsswriters)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass PhysicalProtectionSource:\n \"\"\"Implementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\"\"\"\n\n def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None):\n \"\"\"Constructor for the PhysicalProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PhysicalProtectionSource:\n \"\"\"Implementation of the 'PhysicalProtectionSource' model. Specifies a Protection Source in a Physical environment. Attributes: agents (list of AgentInformation): Array of Agents on the Physical Protection Source. Specifiles the agents running on the Physical Protection Source and the status information. cluster_source_type (string): Specifies the type of cluster resource this source represents. host_name (string): Specifies the hostname. host_type (HostTypeEnum): Specifies the environment type for the host. 'kLinux' indicates the Linux operating system. 'kWindows' indicates the Microsoft Windows operating system. 'kAix' indicates the IBM AIX operating system. 'kSolaris' indicates the Oracle So\"\"\"\n\n def __init__(self, agents=None, cluster_source_type=None, host_name=None, host_type=None, id=None, is_proxy_host=None, memory_size_bytes=None, name=None, networking_info=None, num_processors=None, os_name=None, mtype=None, vcs_version=None, volumes=None, vsswriters=None):\n \"\"\"Constructor for the PhysicalProtectionSource class\"\"\"\n self.agents = agents\n self.cluster_source_type = cluster_source_type\n self.host_name = host_name\n self.host_type = host_type\n self.id = id\n self.is_proxy_host = is_proxy_host\n self.memory_size_bytes = memory_size_bytes\n self.name = name\n self.networking_info = networking_info\n self.num_processors = num_processors\n self.os_name = os_name\n self.mtype = mtype\n self.vcs_version = vcs_version\n self.volumes = volumes\n self.vsswriters = vsswriters\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n agents = None\n if dictionary.get('agents') != None:\n agents = list()\n for structure in dictionary.get('agents'):\n agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))\n cluster_source_type = dictionary.get('clusterSourceType')\n host_name = dictionary.get('hostName')\n host_type = dictionary.get('hostType')\n id = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None\n is_proxy_host = dictionary.get('isProxyHost')\n memory_size_bytes = dictionary.get('memorySizeBytes')\n name = dictionary.get('name')\n networking_info = cohesity_management_sdk.models.networking_information.NetworkingInformation.from_dictionary(dictionary.get('networkingInfo')) if dictionary.get('networkingInfo') else None\n num_processors = dictionary.get('numProcessors')\n os_name = dictionary.get('osName')\n mtype = dictionary.get('type')\n vcs_version = dictionary.get('vcsVersion')\n volumes = None\n if dictionary.get('volumes') != None:\n volumes = list()\n for structure in dictionary.get('volumes'):\n volumes.append(cohesity_management_sdk.models.physical_volume.PhysicalVolume.from_dictionary(structure))\n vsswriters = None\n if dictionary.get('vsswriters') != None:\n vsswriters = list()\n for structure in dictionary.get('vsswriters'):\n vsswriters.append(cohesity_management_sdk.models.vss_writer.VssWriter.from_dictionary(structure))\n return cls(agents, cluster_source_type, host_name, host_type, id, is_proxy_host, memory_size_bytes, name, networking_info, num_processors, os_name, mtype, vcs_version, volumes, vsswriters)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/physical_protection_source.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "f9459571eb87e957df89eb1796c9a42d77506896", "bodies": ["base.Layer.__init__(self, **kwargs)\nself._num_output = self.spec.get('num_output', 0)\nif self._num_output <= 0:\n raise base.InvalidLayerError('Incorrect or unspecified num_output for %s' % self.name)\nself._reg = self.spec.get('reg', None)\nself._filler = self.spec.get('filler', None)\nself._weight = base.Blob(filler=self._filler)\nself._has_bias = self.spec.get('bias', True)\nif self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\nelse:\n self._param = [self._weight]", "features = bottom[0].data()\noutput = top[0].init_data(features.shape[:-1] + (self._num_output,), features.dtype, setdata=False)\nif not self._weight.has_data():\n self._weight.init_data((features.shape[-1], self._num_output), features.dtype)\nif self._has_bias and (not self._bias.has_data()):\n self._bias.init_data(self._num_output, features.dtype)\nweight = self._weight.data()\nblasdot.dot_lastdim(features, weight, out=output)\nif self._has_bias:\n output += self._bias.data()", "top_diff = top[0].diff()\nfeatures = bottom[0].data()\nweight_diff = self._weight.init_diff(setzero=False)\nblasdot.dot_firstdims(features, top_diff, out=weight_diff)\nif self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\nif propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T, out=bottom_diff)\nif self._reg is not None:\n return self._reg.reg(self._weight)\nelse:\n return 0.0", "self._weight.update()\nif self._has_bias:\n self._bias.update()"], "bodies_text": "<|body_start_0|>\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError('Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]\n<|end_body_0|>\n\n<|body_start_1|>\n features = bottom[0].data()\n output = top[0].init_data(features.shape[:-1] + (self._num_output,), features.dtype, setdata=False)\n if not self._weight.has_data():\n self._weight.init_data((features.shape[-1], self._num_output), features.dtype)\n if self._has_bias and (not self._bias.has_data()):\n self._bias.init_data(self._num_output, features.dtype)\n weight = self._weight.data()\n blasdot.dot_lastdim(features, weight, out=output)\n if self._has_bias:\n output += self._bias.data()\n<|end_body_1|>\n\n<|body_start_2|>\n top_diff = top[0].diff()\n features = bottom[0].data()\n weight_diff = self._weight.init_diff(setzero=False)\n blasdot.dot_firstdims(features, top_diff, out=weight_diff)\n if self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T, out=bottom_diff)\n if self._reg is not None:\n return self._reg.reg(self._weight)\n else:\n return 0.0\n<|end_body_2|>\n\n<|body_start_3|>\n self._weight.update()\n if self._has_bias:\n self._bias.update()\n<|end_body_3|>\n", "class_docstring": "A layer that implements the inner product.", "class_name": "InnerProductLayer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InnerProductLayer:\n \"\"\"A layer that implements the inner product.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\"\"\"\n <|body_0|>\n\n def forward(self, bottom, top):\n \"\"\"Computes the forward pass.\"\"\"\n <|body_1|>\n\n def backward(self, bottom, top, propagate_down):\n \"\"\"Computes the backward pass.\"\"\"\n <|body_2|>\n\n def update(self):\n \"\"\"Updates the parameters.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError('Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]\n<|end_body_0|>\n\n<|body_start_1|>\n features = bottom[0].data()\n output = top[0].init_data(features.shape[:-1] + (self._num_output,), features.dtype, setdata=False)\n if not self._weight.has_data():\n self._weight.init_data((features.shape[-1], self._num_output), features.dtype)\n if self._has_bias and (not self._bias.has_data()):\n self._bias.init_data(self._num_output, features.dtype)\n weight = self._weight.data()\n blasdot.dot_lastdim(features, weight, out=output)\n if self._has_bias:\n output += self._bias.data()\n<|end_body_1|>\n\n<|body_start_2|>\n top_diff = top[0].diff()\n features = bottom[0].data()\n weight_diff = self._weight.init_diff(setzero=False)\n blasdot.dot_firstdims(features, top_diff, out=weight_diff)\n if self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T, out=bottom_diff)\n if self._reg is not None:\n return self._reg.reg(self._weight)\n else:\n return 0.0\n<|end_body_2|>\n\n<|body_start_3|>\n self._weight.update()\n if self._has_bias:\n self._bias.update()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000120", "length_bytes": 3377, "license_type": "no_license", "methods": [{"docstring": "Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.", "name": "__init__", "signature": "def __init__(self, **kwargs)"}, {"docstring": "Computes the forward pass.", "name": "forward", "signature": "def forward(self, bottom, top)"}, {"docstring": "Computes the backward pass.", "name": "backward", "signature": "def backward(self, bottom, top, propagate_down)"}, {"docstring": "Updates the parameters.", "name": "update", "signature": "def update(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006439", "prompt": "Implement the Python class `InnerProductLayer` described below.\n\nClass description:\nA layer that implements the inner product.\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\n- def forward(self, bottom, top): Computes the forward pass.\n- def backward(self, bottom, top, propagate_down): Computes the backward pass.\n- def update(self): Updates the parameters.", "prompted_full_text": "Implement the Python class `InnerProductLayer` described below.\n\nClass description:\nA layer that implements the inner product.\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\n- def forward(self, bottom, top): Computes the forward pass.\n- def backward(self, bottom, top, propagate_down): Computes the backward pass.\n- def update(self): Updates the parameters.\n\n<|skeleton|>\nclass InnerProductLayer:\n \"\"\"A layer that implements the inner product.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\"\"\"\n <|body_0|>\n\n def forward(self, bottom, top):\n \"\"\"Computes the forward pass.\"\"\"\n <|body_1|>\n\n def backward(self, bottom, top, propagate_down):\n \"\"\"Computes the backward pass.\"\"\"\n <|body_2|>\n\n def update(self):\n \"\"\"Updates the parameters.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError('Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]\n<|end_body_0|>\n\n<|body_start_1|>\n features = bottom[0].data()\n output = top[0].init_data(features.shape[:-1] + (self._num_output,), features.dtype, setdata=False)\n if not self._weight.has_data():\n self._weight.init_data((features.shape[-1], self._num_output), features.dtype)\n if self._has_bias and (not self._bias.has_data()):\n self._bias.init_data(self._num_output, features.dtype)\n weight = self._weight.data()\n blasdot.dot_lastdim(features, weight, out=output)\n if self._has_bias:\n output += self._bias.data()\n<|end_body_1|>\n\n<|body_start_2|>\n top_diff = top[0].diff()\n features = bottom[0].data()\n weight_diff = self._weight.init_diff(setzero=False)\n blasdot.dot_firstdims(features, top_diff, out=weight_diff)\n if self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T, out=bottom_diff)\n if self._reg is not None:\n return self._reg.reg(self._weight)\n else:\n return 0.0\n<|end_body_2|>\n\n<|body_start_3|>\n self._weight.update()\n if self._has_bias:\n self._bias.update()\n<|end_body_3|>\n", "revision_id": "6fa4cdfbd0d0b8d486d7146bf1e32edd3662fec4", "skeleton": "<|skeleton|>\nclass InnerProductLayer:\n \"\"\"A layer that implements the inner product.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\"\"\"\n <|body_0|>\n\n def forward(self, bottom, top):\n \"\"\"Computes the forward pass.\"\"\"\n <|body_1|>\n\n def backward(self, bottom, top, propagate_down):\n \"\"\"Computes the backward pass.\"\"\"\n <|body_2|>\n\n def update(self):\n \"\"\"Updates the parameters.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InnerProductLayer:\n \"\"\"A layer that implements the inner product.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializes an inner product layer. kwargs: num_output: the number of outputs. reg: the regularizer to be used to add regularization terms. should be a decaf.base.Regularizer instance. Default None. filler: a filler to initialize the weights. Should be a decaf.base.Filler instance. Default None. bias_filler: a filler to initialize the bias. bias: if True, the inner product will contain a bias term. Default True.\"\"\"\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError('Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]\n\n def forward(self, bottom, top):\n \"\"\"Computes the forward pass.\"\"\"\n features = bottom[0].data()\n output = top[0].init_data(features.shape[:-1] + (self._num_output,), features.dtype, setdata=False)\n if not self._weight.has_data():\n self._weight.init_data((features.shape[-1], self._num_output), features.dtype)\n if self._has_bias and (not self._bias.has_data()):\n self._bias.init_data(self._num_output, features.dtype)\n weight = self._weight.data()\n blasdot.dot_lastdim(features, weight, out=output)\n if self._has_bias:\n output += self._bias.data()\n\n def backward(self, bottom, top, propagate_down):\n \"\"\"Computes the backward pass.\"\"\"\n top_diff = top[0].diff()\n features = bottom[0].data()\n weight_diff = self._weight.init_diff(setzero=False)\n blasdot.dot_firstdims(features, top_diff, out=weight_diff)\n if self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T, out=bottom_diff)\n if self._reg is not None:\n return self._reg.reg(self._weight)\n else:\n return 0.0\n\n def update(self):\n \"\"\"Updates the parameters.\"\"\"\n self._weight.update()\n if self._has_bias:\n self._bias.update()\n", "source": "the_stack_v2_python_sparse", "source_path": "decaf/layers/innerproduct.py", "source_repo": "UCBAIR/decaf-release", "split": "val", "star_events_count": 62} {"blob_id": "fe6133b057ca146dfd8c8446bb3a4c85bb58d49e", "bodies": ["self.upper_num = 320\nself.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype('int64')\nself.out = count(self.x, self.upper_num)\nself.place = paddle.CUDAPlace(0)", "paddle.enable_static()\nwith paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data('x', self.x.shape, dtype='int64')\n out = utils._number_count(x, self.upper_num)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'x': self.x}, fetch_list=[out])\n assert np.allclose(res, self.out)\nprint('test_MoE_number_count_static mode passed!')", "paddle.disable_static()\nx = paddle.to_tensor(self.x)\nout = utils._number_count(x, self.upper_num)\nassert np.allclose(out.numpy(), self.out)\nprint('test_MoE_number_count_dygraph passed!')"], "bodies_text": "<|body_start_0|>\n self.upper_num = 320\n self.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype('int64')\n self.out = count(self.x, self.upper_num)\n self.place = paddle.CUDAPlace(0)\n<|end_body_0|>\n\n<|body_start_1|>\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data('x', self.x.shape, dtype='int64')\n out = utils._number_count(x, self.upper_num)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'x': self.x}, fetch_list=[out])\n assert np.allclose(res, self.out)\n print('test_MoE_number_count_static mode passed!')\n<|end_body_1|>\n\n<|body_start_2|>\n paddle.disable_static()\n x = paddle.to_tensor(self.x)\n out = utils._number_count(x, self.upper_num)\n assert np.allclose(out.numpy(), self.out)\n print('test_MoE_number_count_dygraph passed!')\n<|end_body_2|>\n", "class_docstring": "TestNumberCountAPI", "class_name": "TestNumberCountAPI", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestNumberCountAPI:\n \"\"\"TestNumberCountAPI\"\"\"\n\n def setUp(self):\n \"\"\"setUp\"\"\"\n <|body_0|>\n\n def test_MoE_number_count_static(self):\n \"\"\"test_MoE_number_count_static\"\"\"\n <|body_1|>\n\n def test_MoE_number_count_dygraph(self):\n \"\"\"test_MoE_number_count_dygraph\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.upper_num = 320\n self.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype('int64')\n self.out = count(self.x, self.upper_num)\n self.place = paddle.CUDAPlace(0)\n<|end_body_0|>\n\n<|body_start_1|>\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data('x', self.x.shape, dtype='int64')\n out = utils._number_count(x, self.upper_num)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'x': self.x}, fetch_list=[out])\n assert np.allclose(res, self.out)\n print('test_MoE_number_count_static mode passed!')\n<|end_body_1|>\n\n<|body_start_2|>\n paddle.disable_static()\n x = paddle.to_tensor(self.x)\n out = utils._number_count(x, self.upper_num)\n assert np.allclose(out.numpy(), self.out)\n print('test_MoE_number_count_dygraph passed!')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000121", "length_bytes": 2365, "license_type": "no_license", "methods": [{"docstring": "setUp", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "test_MoE_number_count_static", "name": "test_MoE_number_count_static", "signature": "def test_MoE_number_count_static(self)"}, {"docstring": "test_MoE_number_count_dygraph", "name": "test_MoE_number_count_dygraph", "signature": "def test_MoE_number_count_dygraph(self)"}], "n_methods": 3, "prompt": "Implement the Python class `TestNumberCountAPI` described below.\n\nClass description:\nTestNumberCountAPI\n\nMethod signatures and docstrings:\n- def setUp(self): setUp\n- def test_MoE_number_count_static(self): test_MoE_number_count_static\n- def test_MoE_number_count_dygraph(self): test_MoE_number_count_dygraph", "prompted_full_text": "Implement the Python class `TestNumberCountAPI` described below.\n\nClass description:\nTestNumberCountAPI\n\nMethod signatures and docstrings:\n- def setUp(self): setUp\n- def test_MoE_number_count_static(self): test_MoE_number_count_static\n- def test_MoE_number_count_dygraph(self): test_MoE_number_count_dygraph\n\n<|skeleton|>\nclass TestNumberCountAPI:\n \"\"\"TestNumberCountAPI\"\"\"\n\n def setUp(self):\n \"\"\"setUp\"\"\"\n <|body_0|>\n\n def test_MoE_number_count_static(self):\n \"\"\"test_MoE_number_count_static\"\"\"\n <|body_1|>\n\n def test_MoE_number_count_dygraph(self):\n \"\"\"test_MoE_number_count_dygraph\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.upper_num = 320\n self.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype('int64')\n self.out = count(self.x, self.upper_num)\n self.place = paddle.CUDAPlace(0)\n<|end_body_0|>\n\n<|body_start_1|>\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data('x', self.x.shape, dtype='int64')\n out = utils._number_count(x, self.upper_num)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'x': self.x}, fetch_list=[out])\n assert np.allclose(res, self.out)\n print('test_MoE_number_count_static mode passed!')\n<|end_body_1|>\n\n<|body_start_2|>\n paddle.disable_static()\n x = paddle.to_tensor(self.x)\n out = utils._number_count(x, self.upper_num)\n assert np.allclose(out.numpy(), self.out)\n print('test_MoE_number_count_dygraph passed!')\n<|end_body_2|>\n", "revision_id": "bd3790ce72a2a26611b5eda3901651b5a809348f", "skeleton": "<|skeleton|>\nclass TestNumberCountAPI:\n \"\"\"TestNumberCountAPI\"\"\"\n\n def setUp(self):\n \"\"\"setUp\"\"\"\n <|body_0|>\n\n def test_MoE_number_count_static(self):\n \"\"\"test_MoE_number_count_static\"\"\"\n <|body_1|>\n\n def test_MoE_number_count_dygraph(self):\n \"\"\"test_MoE_number_count_dygraph\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestNumberCountAPI:\n \"\"\"TestNumberCountAPI\"\"\"\n\n def setUp(self):\n \"\"\"setUp\"\"\"\n self.upper_num = 320\n self.x = np.random.randint(-1, self.upper_num, size=(6000, 200)).astype('int64')\n self.out = count(self.x, self.upper_num)\n self.place = paddle.CUDAPlace(0)\n\n def test_MoE_number_count_static(self):\n \"\"\"test_MoE_number_count_static\"\"\"\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data('x', self.x.shape, dtype='int64')\n out = utils._number_count(x, self.upper_num)\n exe = paddle.static.Executor(self.place)\n res = exe.run(feed={'x': self.x}, fetch_list=[out])\n assert np.allclose(res, self.out)\n print('test_MoE_number_count_static mode passed!')\n\n def test_MoE_number_count_dygraph(self):\n \"\"\"test_MoE_number_count_dygraph\"\"\"\n paddle.disable_static()\n x = paddle.to_tensor(self.x)\n out = utils._number_count(x, self.upper_num)\n assert np.allclose(out.numpy(), self.out)\n print('test_MoE_number_count_dygraph passed!')\n", "source": "the_stack_v2_python_sparse", "source_path": "distributed/CE_API/case/dist_MoE_number_count.py", "source_repo": "PaddlePaddle/PaddleTest", "split": "val", "star_events_count": 42} {"blob_id": "8140b2ff8fe6b4357ed90f12e8e9f61482997883", "bodies": ["if EVENT_ALL_EVENTS in event_types:\n event_types = EVENTS\nself._event_types = {camelcase(evt): evt for evt in event_types}\nself._custom_attributes = custom_attributes\nself._scan_interval = scan_interval\nself._async_see = async_see\nself._api = api\nself._hass = hass\nself._max_accuracy = max_accuracy\nself._skip_accuracy_on = skip_accuracy_on\nself._devices: list[DeviceModel] = []\nself._positions: list[PositionModel] = []\nself._geofences: list[GeofenceModel] = []", "try:\n await self._api.get_server()\nexcept TraccarAuthenticationException:\n _LOGGER.error('Authentication for Traccar failed')\n return False\nexcept TraccarConnectionException as exception:\n _LOGGER.error('Connection with Traccar failed - %s', exception)\n return False\nawait self._async_update()\nasync_track_time_interval(self._hass, self._async_update, self._scan_interval, cancel_on_shutdown=True)\nreturn True", "_LOGGER.debug('Updating device data')\ntry:\n self._devices, self._positions, self._geofences = await asyncio.gather(self._api.get_devices(), self._api.get_positions(), self._api.get_geofences())\nexcept TraccarException as ex:\n _LOGGER.error('Error while updating device data: %s', ex)\n return\nself._hass.async_create_task(self.import_device_data())\nif self._event_types:\n self._hass.async_create_task(self.import_events())", "for position in self._positions:\n device = next((dev for dev in self._devices if dev.id == position.device_id), None)\n if not device:\n continue\n attr = {ATTR_TRACKER: 'traccar', ATTR_ADDRESS: position.address, ATTR_SPEED: position.speed, ATTR_ALTITUDE: position.altitude, ATTR_MOTION: position.attributes.get('motion', False), ATTR_TRACCAR_ID: device.id, ATTR_GEOFENCE: next((geofence.name for geofence in self._geofences if geofence.id in (device.geofence_ids or [])), None), ATTR_CATEGORY: device.category, ATTR_STATUS: device.status}\n skip_accuracy_filter = False\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n accuracy = position.accuracy or 0.0\n if not skip_accuracy_filter and self._max_accuracy > 0 and (accuracy > self._max_accuracy):\n _LOGGER.debug('Excluded position by accuracy filter: %f (%s)', accuracy, attr[ATTR_TRACCAR_ID])\n continue\n await self._async_see(dev_id=slugify(device.name), gps=(position.latitude, position.longitude), gps_accuracy=accuracy, battery=position.attributes.get('batteryLevel', -1), attributes=attr)", "start_intervel = dt_util.utcnow().replace(tzinfo=None)\nevents = await self._api.get_reports_events(devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys())\nif events is not None:\n for event in events:\n self._hass.bus.async_fire(f'traccar_{self._event_types.get(event.type)}', {'device_traccar_id': event.device_id, 'device_name': next((dev.name for dev in self._devices if dev.id == event.device_id), None), 'type': event.type, 'serverTime': event.event_time, 'attributes': event.attributes})"], "bodies_text": "<|body_start_0|>\n if EVENT_ALL_EVENTS in event_types:\n event_types = EVENTS\n self._event_types = {camelcase(evt): evt for evt in event_types}\n self._custom_attributes = custom_attributes\n self._scan_interval = scan_interval\n self._async_see = async_see\n self._api = api\n self._hass = hass\n self._max_accuracy = max_accuracy\n self._skip_accuracy_on = skip_accuracy_on\n self._devices: list[DeviceModel] = []\n self._positions: list[PositionModel] = []\n self._geofences: list[GeofenceModel] = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self._api.get_server()\n except TraccarAuthenticationException:\n _LOGGER.error('Authentication for Traccar failed')\n return False\n except TraccarConnectionException as exception:\n _LOGGER.error('Connection with Traccar failed - %s', exception)\n return False\n await self._async_update()\n async_track_time_interval(self._hass, self._async_update, self._scan_interval, cancel_on_shutdown=True)\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n _LOGGER.debug('Updating device data')\n try:\n self._devices, self._positions, self._geofences = await asyncio.gather(self._api.get_devices(), self._api.get_positions(), self._api.get_geofences())\n except TraccarException as ex:\n _LOGGER.error('Error while updating device data: %s', ex)\n return\n self._hass.async_create_task(self.import_device_data())\n if self._event_types:\n self._hass.async_create_task(self.import_events())\n<|end_body_2|>\n\n<|body_start_3|>\n for position in self._positions:\n device = next((dev for dev in self._devices if dev.id == position.device_id), None)\n if not device:\n continue\n attr = {ATTR_TRACKER: 'traccar', ATTR_ADDRESS: position.address, ATTR_SPEED: position.speed, ATTR_ALTITUDE: position.altitude, ATTR_MOTION: position.attributes.get('motion', False), ATTR_TRACCAR_ID: device.id, ATTR_GEOFENCE: next((geofence.name for geofence in self._geofences if geofence.id in (device.geofence_ids or [])), None), ATTR_CATEGORY: device.category, ATTR_STATUS: device.status}\n skip_accuracy_filter = False\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n accuracy = position.accuracy or 0.0\n if not skip_accuracy_filter and self._max_accuracy > 0 and (accuracy > self._max_accuracy):\n _LOGGER.debug('Excluded position by accuracy filter: %f (%s)', accuracy, attr[ATTR_TRACCAR_ID])\n continue\n await self._async_see(dev_id=slugify(device.name), gps=(position.latitude, position.longitude), gps_accuracy=accuracy, battery=position.attributes.get('batteryLevel', -1), attributes=attr)\n<|end_body_3|>\n\n<|body_start_4|>\n start_intervel = dt_util.utcnow().replace(tzinfo=None)\n events = await self._api.get_reports_events(devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys())\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(f'traccar_{self._event_types.get(event.type)}', {'device_traccar_id': event.device_id, 'device_name': next((dev.name for dev in self._devices if dev.id == event.device_id), None), 'type': event.type, 'serverTime': event.event_time, 'attributes': event.attributes})\n<|end_body_4|>\n", "class_docstring": "Define an object to retrieve Traccar data.", "class_name": "TraccarScanner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TraccarScanner:\n \"\"\"Define an object to retrieve Traccar data.\"\"\"\n\n def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_init(self):\n \"\"\"Further initialize connection to Traccar.\"\"\"\n <|body_1|>\n\n async def _async_update(self, now=None):\n \"\"\"Update info from Traccar.\"\"\"\n <|body_2|>\n\n async def import_device_data(self):\n \"\"\"Import device data from Traccar.\"\"\"\n <|body_3|>\n\n async def import_events(self):\n \"\"\"Import events from Traccar.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if EVENT_ALL_EVENTS in event_types:\n event_types = EVENTS\n self._event_types = {camelcase(evt): evt for evt in event_types}\n self._custom_attributes = custom_attributes\n self._scan_interval = scan_interval\n self._async_see = async_see\n self._api = api\n self._hass = hass\n self._max_accuracy = max_accuracy\n self._skip_accuracy_on = skip_accuracy_on\n self._devices: list[DeviceModel] = []\n self._positions: list[PositionModel] = []\n self._geofences: list[GeofenceModel] = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self._api.get_server()\n except TraccarAuthenticationException:\n _LOGGER.error('Authentication for Traccar failed')\n return False\n except TraccarConnectionException as exception:\n _LOGGER.error('Connection with Traccar failed - %s', exception)\n return False\n await self._async_update()\n async_track_time_interval(self._hass, self._async_update, self._scan_interval, cancel_on_shutdown=True)\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n _LOGGER.debug('Updating device data')\n try:\n self._devices, self._positions, self._geofences = await asyncio.gather(self._api.get_devices(), self._api.get_positions(), self._api.get_geofences())\n except TraccarException as ex:\n _LOGGER.error('Error while updating device data: %s', ex)\n return\n self._hass.async_create_task(self.import_device_data())\n if self._event_types:\n self._hass.async_create_task(self.import_events())\n<|end_body_2|>\n\n<|body_start_3|>\n for position in self._positions:\n device = next((dev for dev in self._devices if dev.id == position.device_id), None)\n if not device:\n continue\n attr = {ATTR_TRACKER: 'traccar', ATTR_ADDRESS: position.address, ATTR_SPEED: position.speed, ATTR_ALTITUDE: position.altitude, ATTR_MOTION: position.attributes.get('motion', False), ATTR_TRACCAR_ID: device.id, ATTR_GEOFENCE: next((geofence.name for geofence in self._geofences if geofence.id in (device.geofence_ids or [])), None), ATTR_CATEGORY: device.category, ATTR_STATUS: device.status}\n skip_accuracy_filter = False\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n accuracy = position.accuracy or 0.0\n if not skip_accuracy_filter and self._max_accuracy > 0 and (accuracy > self._max_accuracy):\n _LOGGER.debug('Excluded position by accuracy filter: %f (%s)', accuracy, attr[ATTR_TRACCAR_ID])\n continue\n await self._async_see(dev_id=slugify(device.name), gps=(position.latitude, position.longitude), gps_accuracy=accuracy, battery=position.attributes.get('batteryLevel', -1), attributes=attr)\n<|end_body_3|>\n\n<|body_start_4|>\n start_intervel = dt_util.utcnow().replace(tzinfo=None)\n events = await self._api.get_reports_events(devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys())\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(f'traccar_{self._event_types.get(event.type)}', {'device_traccar_id': event.device_id, 'device_name': next((dev.name for dev in self._devices if dev.id == event.device_id), None), 'type': event.type, 'serverTime': event.event_time, 'attributes': event.attributes})\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000122", "length_bytes": 15131, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None"}, {"docstring": "Further initialize connection to Traccar.", "name": "async_init", "signature": "async def async_init(self)"}, {"docstring": "Update info from Traccar.", "name": "_async_update", "signature": "async def _async_update(self, now=None)"}, {"docstring": "Import device data from Traccar.", "name": "import_device_data", "signature": "async def import_device_data(self)"}, {"docstring": "Import events from Traccar.", "name": "import_events", "signature": "async def import_events(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002924", "prompt": "Implement the Python class `TraccarScanner` described below.\n\nClass description:\nDefine an object to retrieve Traccar data.\n\nMethod signatures and docstrings:\n- def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None: Initialize.\n- async def async_init(self): Further initialize connection to Traccar.\n- async def _async_update(self, now=None): Update info from Traccar.\n- async def import_device_data(self): Import device data from Traccar.\n- async def import_events(self): Import events from Traccar.", "prompted_full_text": "Implement the Python class `TraccarScanner` described below.\n\nClass description:\nDefine an object to retrieve Traccar data.\n\nMethod signatures and docstrings:\n- def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None: Initialize.\n- async def async_init(self): Further initialize connection to Traccar.\n- async def _async_update(self, now=None): Update info from Traccar.\n- async def import_device_data(self): Import device data from Traccar.\n- async def import_events(self): Import events from Traccar.\n\n<|skeleton|>\nclass TraccarScanner:\n \"\"\"Define an object to retrieve Traccar data.\"\"\"\n\n def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_init(self):\n \"\"\"Further initialize connection to Traccar.\"\"\"\n <|body_1|>\n\n async def _async_update(self, now=None):\n \"\"\"Update info from Traccar.\"\"\"\n <|body_2|>\n\n async def import_device_data(self):\n \"\"\"Import device data from Traccar.\"\"\"\n <|body_3|>\n\n async def import_events(self):\n \"\"\"Import events from Traccar.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if EVENT_ALL_EVENTS in event_types:\n event_types = EVENTS\n self._event_types = {camelcase(evt): evt for evt in event_types}\n self._custom_attributes = custom_attributes\n self._scan_interval = scan_interval\n self._async_see = async_see\n self._api = api\n self._hass = hass\n self._max_accuracy = max_accuracy\n self._skip_accuracy_on = skip_accuracy_on\n self._devices: list[DeviceModel] = []\n self._positions: list[PositionModel] = []\n self._geofences: list[GeofenceModel] = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self._api.get_server()\n except TraccarAuthenticationException:\n _LOGGER.error('Authentication for Traccar failed')\n return False\n except TraccarConnectionException as exception:\n _LOGGER.error('Connection with Traccar failed - %s', exception)\n return False\n await self._async_update()\n async_track_time_interval(self._hass, self._async_update, self._scan_interval, cancel_on_shutdown=True)\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n _LOGGER.debug('Updating device data')\n try:\n self._devices, self._positions, self._geofences = await asyncio.gather(self._api.get_devices(), self._api.get_positions(), self._api.get_geofences())\n except TraccarException as ex:\n _LOGGER.error('Error while updating device data: %s', ex)\n return\n self._hass.async_create_task(self.import_device_data())\n if self._event_types:\n self._hass.async_create_task(self.import_events())\n<|end_body_2|>\n\n<|body_start_3|>\n for position in self._positions:\n device = next((dev for dev in self._devices if dev.id == position.device_id), None)\n if not device:\n continue\n attr = {ATTR_TRACKER: 'traccar', ATTR_ADDRESS: position.address, ATTR_SPEED: position.speed, ATTR_ALTITUDE: position.altitude, ATTR_MOTION: position.attributes.get('motion', False), ATTR_TRACCAR_ID: device.id, ATTR_GEOFENCE: next((geofence.name for geofence in self._geofences if geofence.id in (device.geofence_ids or [])), None), ATTR_CATEGORY: device.category, ATTR_STATUS: device.status}\n skip_accuracy_filter = False\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n accuracy = position.accuracy or 0.0\n if not skip_accuracy_filter and self._max_accuracy > 0 and (accuracy > self._max_accuracy):\n _LOGGER.debug('Excluded position by accuracy filter: %f (%s)', accuracy, attr[ATTR_TRACCAR_ID])\n continue\n await self._async_see(dev_id=slugify(device.name), gps=(position.latitude, position.longitude), gps_accuracy=accuracy, battery=position.attributes.get('batteryLevel', -1), attributes=attr)\n<|end_body_3|>\n\n<|body_start_4|>\n start_intervel = dt_util.utcnow().replace(tzinfo=None)\n events = await self._api.get_reports_events(devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys())\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(f'traccar_{self._event_types.get(event.type)}', {'device_traccar_id': event.device_id, 'device_name': next((dev.name for dev in self._devices if dev.id == event.device_id), None), 'type': event.type, 'serverTime': event.event_time, 'attributes': event.attributes})\n<|end_body_4|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass TraccarScanner:\n \"\"\"Define an object to retrieve Traccar data.\"\"\"\n\n def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_init(self):\n \"\"\"Further initialize connection to Traccar.\"\"\"\n <|body_1|>\n\n async def _async_update(self, now=None):\n \"\"\"Update info from Traccar.\"\"\"\n <|body_2|>\n\n async def import_device_data(self):\n \"\"\"Import device data from Traccar.\"\"\"\n <|body_3|>\n\n async def import_events(self):\n \"\"\"Import events from Traccar.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TraccarScanner:\n \"\"\"Define an object to retrieve Traccar data.\"\"\"\n\n def __init__(self, api: ApiClient, hass: HomeAssistant, async_see: AsyncSeeCallback, scan_interval: timedelta, max_accuracy: int, skip_accuracy_on: bool, custom_attributes: list[str], event_types: list[str]) -> None:\n \"\"\"Initialize.\"\"\"\n if EVENT_ALL_EVENTS in event_types:\n event_types = EVENTS\n self._event_types = {camelcase(evt): evt for evt in event_types}\n self._custom_attributes = custom_attributes\n self._scan_interval = scan_interval\n self._async_see = async_see\n self._api = api\n self._hass = hass\n self._max_accuracy = max_accuracy\n self._skip_accuracy_on = skip_accuracy_on\n self._devices: list[DeviceModel] = []\n self._positions: list[PositionModel] = []\n self._geofences: list[GeofenceModel] = []\n\n async def async_init(self):\n \"\"\"Further initialize connection to Traccar.\"\"\"\n try:\n await self._api.get_server()\n except TraccarAuthenticationException:\n _LOGGER.error('Authentication for Traccar failed')\n return False\n except TraccarConnectionException as exception:\n _LOGGER.error('Connection with Traccar failed - %s', exception)\n return False\n await self._async_update()\n async_track_time_interval(self._hass, self._async_update, self._scan_interval, cancel_on_shutdown=True)\n return True\n\n async def _async_update(self, now=None):\n \"\"\"Update info from Traccar.\"\"\"\n _LOGGER.debug('Updating device data')\n try:\n self._devices, self._positions, self._geofences = await asyncio.gather(self._api.get_devices(), self._api.get_positions(), self._api.get_geofences())\n except TraccarException as ex:\n _LOGGER.error('Error while updating device data: %s', ex)\n return\n self._hass.async_create_task(self.import_device_data())\n if self._event_types:\n self._hass.async_create_task(self.import_events())\n\n async def import_device_data(self):\n \"\"\"Import device data from Traccar.\"\"\"\n for position in self._positions:\n device = next((dev for dev in self._devices if dev.id == position.device_id), None)\n if not device:\n continue\n attr = {ATTR_TRACKER: 'traccar', ATTR_ADDRESS: position.address, ATTR_SPEED: position.speed, ATTR_ALTITUDE: position.altitude, ATTR_MOTION: position.attributes.get('motion', False), ATTR_TRACCAR_ID: device.id, ATTR_GEOFENCE: next((geofence.name for geofence in self._geofences if geofence.id in (device.geofence_ids or [])), None), ATTR_CATEGORY: device.category, ATTR_STATUS: device.status}\n skip_accuracy_filter = False\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n accuracy = position.accuracy or 0.0\n if not skip_accuracy_filter and self._max_accuracy > 0 and (accuracy > self._max_accuracy):\n _LOGGER.debug('Excluded position by accuracy filter: %f (%s)', accuracy, attr[ATTR_TRACCAR_ID])\n continue\n await self._async_see(dev_id=slugify(device.name), gps=(position.latitude, position.longitude), gps_accuracy=accuracy, battery=position.attributes.get('batteryLevel', -1), attributes=attr)\n\n async def import_events(self):\n \"\"\"Import events from Traccar.\"\"\"\n start_intervel = dt_util.utcnow().replace(tzinfo=None)\n events = await self._api.get_reports_events(devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys())\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(f'traccar_{self._event_types.get(event.type)}', {'device_traccar_id': event.device_id, 'device_name': next((dev.name for dev in self._devices if dev.id == event.device_id), None), 'type': event.type, 'serverTime': event.event_time, 'attributes': event.attributes})\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/traccar/device_tracker.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "5719de02c8b56e9c1a4c5b8efa338146b0461852", "bodies": ["super(Downsample, self).__init__()\nself.apply_batchnorm = apply_batchnorm\ninitializer = tf.random_normal_initializer(0, 0.02)\nself.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(size, size), strides=(2, 2), padding='same', kernel_initializer=initializer, use_bias=False)\nif self.apply_batchnorm:\n self.batch_norm = tf.keras.layers.BatchNormalization()", "x = self.conv1(x)\nif self.apply_batchnorm:\n x = self.batch_norm(x, training=training)\nx = tf.nn.leaky_relu(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(Downsample, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n initializer = tf.random_normal_initializer(0, 0.02)\n self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(size, size), strides=(2, 2), padding='same', kernel_initializer=initializer, use_bias=False)\n if self.apply_batchnorm:\n self.batch_norm = tf.keras.layers.BatchNormalization()\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batch_norm(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n<|end_body_1|>\n", "class_docstring": "Use convolution layer to downsample", "class_name": "Downsample", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Downsample:\n \"\"\"Use convolution layer to downsample\"\"\"\n\n def __init__(self, filters, size, apply_batchnorm=True):\n \"\"\"The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\"\"\"\n <|body_0|>\n\n def call(self, x, training):\n \"\"\"Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Downsample, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n initializer = tf.random_normal_initializer(0, 0.02)\n self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(size, size), strides=(2, 2), padding='same', kernel_initializer=initializer, use_bias=False)\n if self.apply_batchnorm:\n self.batch_norm = tf.keras.layers.BatchNormalization()\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batch_norm(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000123", "length_bytes": 20044, "license_type": "no_license", "methods": [{"docstring": "The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:", "name": "__init__", "signature": "def __init__(self, filters, size, apply_batchnorm=True)"}, {"docstring": "Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.", "name": "call", "signature": "def call(self, x, training)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001943", "prompt": "Implement the Python class `Downsample` described below.\n\nClass description:\nUse convolution layer to downsample\n\nMethod signatures and docstrings:\n- def __init__(self, filters, size, apply_batchnorm=True): The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\n- def call(self, x, training): Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.", "prompted_full_text": "Implement the Python class `Downsample` described below.\n\nClass description:\nUse convolution layer to downsample\n\nMethod signatures and docstrings:\n- def __init__(self, filters, size, apply_batchnorm=True): The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\n- def call(self, x, training): Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.\n\n<|skeleton|>\nclass Downsample:\n \"\"\"Use convolution layer to downsample\"\"\"\n\n def __init__(self, filters, size, apply_batchnorm=True):\n \"\"\"The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\"\"\"\n <|body_0|>\n\n def call(self, x, training):\n \"\"\"Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Downsample, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n initializer = tf.random_normal_initializer(0, 0.02)\n self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(size, size), strides=(2, 2), padding='same', kernel_initializer=initializer, use_bias=False)\n if self.apply_batchnorm:\n self.batch_norm = tf.keras.layers.BatchNormalization()\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batch_norm(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n<|end_body_1|>\n", "revision_id": "d1b70b2a954f4665b628ba252b03c1a74b95559f", "skeleton": "<|skeleton|>\nclass Downsample:\n \"\"\"Use convolution layer to downsample\"\"\"\n\n def __init__(self, filters, size, apply_batchnorm=True):\n \"\"\"The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\"\"\"\n <|body_0|>\n\n def call(self, x, training):\n \"\"\"Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Downsample:\n \"\"\"Use convolution layer to downsample\"\"\"\n\n def __init__(self, filters, size, apply_batchnorm=True):\n \"\"\"The construct function. Args: filters: The convolution filters number. size: The convolution filter size. apply_batchnorm If use batch normalization:\"\"\"\n super(Downsample, self).__init__()\n self.apply_batchnorm = apply_batchnorm\n initializer = tf.random_normal_initializer(0, 0.02)\n self.conv1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(size, size), strides=(2, 2), padding='same', kernel_initializer=initializer, use_bias=False)\n if self.apply_batchnorm:\n self.batch_norm = tf.keras.layers.BatchNormalization()\n\n def call(self, x, training):\n \"\"\"Calls the model on new inputs. Args: x: The input. training: If training. Returns: output.\"\"\"\n x = self.conv1(x)\n if self.apply_batchnorm:\n x = self.batch_norm(x, training=training)\n x = tf.nn.leaky_relu(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "NeuralNetworks-tensorflow/generation_network_model/GAN/pix2pix.py", "source_repo": "zhaocc1106/machine_learn", "split": "val", "star_events_count": 15} {"blob_id": "005ca7e6d893d461da61771e1136a4cfe5640463", "bodies": ["if context is None:\n context = {}\npartner_id_obj = self.browse(cr, uid, partner_id)\nlocations = self.pool.get('stock.location').search(cr, uid, [('partner_id', '=', partner_id)])\nif not locations:\n partner_location_id = self.pool.get('stock.location').create(cr, uid, vals={'location_id': partner_id_obj.property_stock_customer and partner_id_obj.property_stock_customer.id or False, 'name': partner_id_obj.name, 'usage': 'customer', 'partner_id': partner_id, 'company_id': self.pool.get('res.users').browse(cr, uid, uid).company_id.id}, context=context)\n self.write(cr, uid, partner_id, vals={'property_stock_customer': partner_location_id}, context=context)\nreturn True", "if context is None:\n context = {}\npartner_id = super(res_partner, self).create(cr, uid, vals, context=context)\nif vals.get('create_customer_location', False):\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\nreturn partner_id", "if context is None:\n context = {}\nres = super(res_partner, self).write(cr, uid, ids, vals, context=context)\nif vals.get('create_customer_location', False):\n if isinstance(ids, (int, long)):\n ids = [ids]\n for partner_id in ids:\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\nreturn res"], "bodies_text": "<|body_start_0|>\n if context is None:\n context = {}\n partner_id_obj = self.browse(cr, uid, partner_id)\n locations = self.pool.get('stock.location').search(cr, uid, [('partner_id', '=', partner_id)])\n if not locations:\n partner_location_id = self.pool.get('stock.location').create(cr, uid, vals={'location_id': partner_id_obj.property_stock_customer and partner_id_obj.property_stock_customer.id or False, 'name': partner_id_obj.name, 'usage': 'customer', 'partner_id': partner_id, 'company_id': self.pool.get('res.users').browse(cr, uid, uid).company_id.id}, context=context)\n self.write(cr, uid, partner_id, vals={'property_stock_customer': partner_location_id}, context=context)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n partner_id = super(res_partner, self).create(cr, uid, vals, context=context)\n if vals.get('create_customer_location', False):\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return partner_id\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n res = super(res_partner, self).write(cr, uid, ids, vals, context=context)\n if vals.get('create_customer_location', False):\n if isinstance(ids, (int, long)):\n ids = [ids]\n for partner_id in ids:\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return res\n<|end_body_2|>\n", "class_docstring": "inherit res.partner for adds the functionally that customer has a location", "class_name": "res_partner", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass res_partner:\n \"\"\"inherit res.partner for adds the functionally that customer has a location\"\"\"\n\n def _set_partner_customer_location(self, cr, uid, partner_id, context=None):\n \"\"\"creates customer location for partner in arguments\"\"\"\n <|body_0|>\n\n def create(self, cr, uid, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_1|>\n\n def write(self, cr, uid, ids, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if context is None:\n context = {}\n partner_id_obj = self.browse(cr, uid, partner_id)\n locations = self.pool.get('stock.location').search(cr, uid, [('partner_id', '=', partner_id)])\n if not locations:\n partner_location_id = self.pool.get('stock.location').create(cr, uid, vals={'location_id': partner_id_obj.property_stock_customer and partner_id_obj.property_stock_customer.id or False, 'name': partner_id_obj.name, 'usage': 'customer', 'partner_id': partner_id, 'company_id': self.pool.get('res.users').browse(cr, uid, uid).company_id.id}, context=context)\n self.write(cr, uid, partner_id, vals={'property_stock_customer': partner_location_id}, context=context)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n partner_id = super(res_partner, self).create(cr, uid, vals, context=context)\n if vals.get('create_customer_location', False):\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return partner_id\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n res = super(res_partner, self).write(cr, uid, ids, vals, context=context)\n if vals.get('create_customer_location', False):\n if isinstance(ids, (int, long)):\n ids = [ids]\n for partner_id in ids:\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000124", "length_bytes": 4053, "license_type": "no_license", "methods": [{"docstring": "creates customer location for partner in arguments", "name": "_set_partner_customer_location", "signature": "def _set_partner_customer_location(self, cr, uid, partner_id, context=None)"}, {"docstring": "Check to create customer location", "name": "create", "signature": "def create(self, cr, uid, vals, context=None)"}, {"docstring": "Check to create customer location", "name": "write", "signature": "def write(self, cr, uid, ids, vals, context=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005697", "prompt": "Implement the Python class `res_partner` described below.\n\nClass description:\ninherit res.partner for adds the functionally that customer has a location\n\nMethod signatures and docstrings:\n- def _set_partner_customer_location(self, cr, uid, partner_id, context=None): creates customer location for partner in arguments\n- def create(self, cr, uid, vals, context=None): Check to create customer location\n- def write(self, cr, uid, ids, vals, context=None): Check to create customer location", "prompted_full_text": "Implement the Python class `res_partner` described below.\n\nClass description:\ninherit res.partner for adds the functionally that customer has a location\n\nMethod signatures and docstrings:\n- def _set_partner_customer_location(self, cr, uid, partner_id, context=None): creates customer location for partner in arguments\n- def create(self, cr, uid, vals, context=None): Check to create customer location\n- def write(self, cr, uid, ids, vals, context=None): Check to create customer location\n\n<|skeleton|>\nclass res_partner:\n \"\"\"inherit res.partner for adds the functionally that customer has a location\"\"\"\n\n def _set_partner_customer_location(self, cr, uid, partner_id, context=None):\n \"\"\"creates customer location for partner in arguments\"\"\"\n <|body_0|>\n\n def create(self, cr, uid, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_1|>\n\n def write(self, cr, uid, ids, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if context is None:\n context = {}\n partner_id_obj = self.browse(cr, uid, partner_id)\n locations = self.pool.get('stock.location').search(cr, uid, [('partner_id', '=', partner_id)])\n if not locations:\n partner_location_id = self.pool.get('stock.location').create(cr, uid, vals={'location_id': partner_id_obj.property_stock_customer and partner_id_obj.property_stock_customer.id or False, 'name': partner_id_obj.name, 'usage': 'customer', 'partner_id': partner_id, 'company_id': self.pool.get('res.users').browse(cr, uid, uid).company_id.id}, context=context)\n self.write(cr, uid, partner_id, vals={'property_stock_customer': partner_location_id}, context=context)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n partner_id = super(res_partner, self).create(cr, uid, vals, context=context)\n if vals.get('create_customer_location', False):\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return partner_id\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n res = super(res_partner, self).write(cr, uid, ids, vals, context=context)\n if vals.get('create_customer_location', False):\n if isinstance(ids, (int, long)):\n ids = [ids]\n for partner_id in ids:\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return res\n<|end_body_2|>\n", "revision_id": "01c8294e969cce818a33fd06682560e0344c217c", "skeleton": "<|skeleton|>\nclass res_partner:\n \"\"\"inherit res.partner for adds the functionally that customer has a location\"\"\"\n\n def _set_partner_customer_location(self, cr, uid, partner_id, context=None):\n \"\"\"creates customer location for partner in arguments\"\"\"\n <|body_0|>\n\n def create(self, cr, uid, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_1|>\n\n def write(self, cr, uid, ids, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class res_partner:\n \"\"\"inherit res.partner for adds the functionally that customer has a location\"\"\"\n\n def _set_partner_customer_location(self, cr, uid, partner_id, context=None):\n \"\"\"creates customer location for partner in arguments\"\"\"\n if context is None:\n context = {}\n partner_id_obj = self.browse(cr, uid, partner_id)\n locations = self.pool.get('stock.location').search(cr, uid, [('partner_id', '=', partner_id)])\n if not locations:\n partner_location_id = self.pool.get('stock.location').create(cr, uid, vals={'location_id': partner_id_obj.property_stock_customer and partner_id_obj.property_stock_customer.id or False, 'name': partner_id_obj.name, 'usage': 'customer', 'partner_id': partner_id, 'company_id': self.pool.get('res.users').browse(cr, uid, uid).company_id.id}, context=context)\n self.write(cr, uid, partner_id, vals={'property_stock_customer': partner_location_id}, context=context)\n return True\n\n def create(self, cr, uid, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n if context is None:\n context = {}\n partner_id = super(res_partner, self).create(cr, uid, vals, context=context)\n if vals.get('create_customer_location', False):\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return partner_id\n\n def write(self, cr, uid, ids, vals, context=None):\n \"\"\"Check to create customer location\"\"\"\n if context is None:\n context = {}\n res = super(res_partner, self).write(cr, uid, ids, vals, context=context)\n if vals.get('create_customer_location', False):\n if isinstance(ids, (int, long)):\n ids = [ids]\n for partner_id in ids:\n self._set_partner_customer_location(cr, uid, partner_id, context=context)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "Varios/alimentacion/__unported__/sale_follow_up/partner.py", "source_repo": "ELNOGAL/GALIPAT_LUGO", "split": "val", "star_events_count": 0} {"blob_id": "49e2858afaf4c77f2c5c2c655e6a5efa9cf9eca0", "bodies": ["self.name = name\nself.type = type\nself.required = required\nself.default = default\nself.ignore = ignore\nself.choices = choices\nself.nullable = nullable\nself.location = location\nself.discard = discard\nself.help = help", "if self.location and hasattr(request, self.location):\n req_data = getattr(request, self.location)\nelif request.method == 'GET':\n req_data = request.args\nelse:\n req_data = request.json\nvalue = self.default\nif req_data and self.name in req_data:\n value = req_data[self.name]\nelif self.required:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is required! %s' % (self.name, self.help))\nelif self.discard:\n return (None, False)\nif value is None:\n if not self.nullable:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is not nullable! %s' % (self.name, self.help))\nelif not self.ignore and (not isinstance(value, self.type)):\n raise ApiError(StatusDef.PARAM_ERROR, 'TypeError: `%s` is %s! %s' % (self.name, self.type, self.help))\nif isinstance(self.choices, abc.Iterable) and value not in self.choices:\n raise ApiError(StatusDef.PARAM_ERROR, 'ValueError: `%s` must in %s!%s' % (self.name, self.choices, self.help))\nreturn (value, True)"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.type = type\n self.required = required\n self.default = default\n self.ignore = ignore\n self.choices = choices\n self.nullable = nullable\n self.location = location\n self.discard = discard\n self.help = help\n<|end_body_0|>\n\n<|body_start_1|>\n if self.location and hasattr(request, self.location):\n req_data = getattr(request, self.location)\n elif request.method == 'GET':\n req_data = request.args\n else:\n req_data = request.json\n value = self.default\n if req_data and self.name in req_data:\n value = req_data[self.name]\n elif self.required:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is required! %s' % (self.name, self.help))\n elif self.discard:\n return (None, False)\n if value is None:\n if not self.nullable:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is not nullable! %s' % (self.name, self.help))\n elif not self.ignore and (not isinstance(value, self.type)):\n raise ApiError(StatusDef.PARAM_ERROR, 'TypeError: `%s` is %s! %s' % (self.name, self.type, self.help))\n if isinstance(self.choices, abc.Iterable) and value not in self.choices:\n raise ApiError(StatusDef.PARAM_ERROR, 'ValueError: `%s` must in %s!%s' % (self.name, self.choices, self.help))\n return (value, True)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Param", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Param:\n\n def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''):\n \"\"\"请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\"\"\"\n <|body_0|>\n\n def load(self):\n \"\"\"解析并检查参数取值 Returns: value(值), valid(是否有效)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.type = type\n self.required = required\n self.default = default\n self.ignore = ignore\n self.choices = choices\n self.nullable = nullable\n self.location = location\n self.discard = discard\n self.help = help\n<|end_body_0|>\n\n<|body_start_1|>\n if self.location and hasattr(request, self.location):\n req_data = getattr(request, self.location)\n elif request.method == 'GET':\n req_data = request.args\n else:\n req_data = request.json\n value = self.default\n if req_data and self.name in req_data:\n value = req_data[self.name]\n elif self.required:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is required! %s' % (self.name, self.help))\n elif self.discard:\n return (None, False)\n if value is None:\n if not self.nullable:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is not nullable! %s' % (self.name, self.help))\n elif not self.ignore and (not isinstance(value, self.type)):\n raise ApiError(StatusDef.PARAM_ERROR, 'TypeError: `%s` is %s! %s' % (self.name, self.type, self.help))\n if isinstance(self.choices, abc.Iterable) and value not in self.choices:\n raise ApiError(StatusDef.PARAM_ERROR, 'ValueError: `%s` must in %s!%s' % (self.name, self.choices, self.help))\n return (value, True)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000125", "length_bytes": 3546, "license_type": "no_license", "methods": [{"docstring": "请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示", "name": "__init__", "signature": "def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help='')"}, {"docstring": "解析并检查参数取值 Returns: value(值), valid(是否有效)", "name": "load", "signature": "def load(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002178", "prompt": "Implement the Python class `Param` described below.\n\nClass description:\nImplement the Param class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''): 请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\n- def load(self): 解析并检查参数取值 Returns: value(值), valid(是否有效)", "prompted_full_text": "Implement the Python class `Param` described below.\n\nClass description:\nImplement the Param class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''): 请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\n- def load(self): 解析并检查参数取值 Returns: value(值), valid(是否有效)\n\n<|skeleton|>\nclass Param:\n\n def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''):\n \"\"\"请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\"\"\"\n <|body_0|>\n\n def load(self):\n \"\"\"解析并检查参数取值 Returns: value(值), valid(是否有效)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.type = type\n self.required = required\n self.default = default\n self.ignore = ignore\n self.choices = choices\n self.nullable = nullable\n self.location = location\n self.discard = discard\n self.help = help\n<|end_body_0|>\n\n<|body_start_1|>\n if self.location and hasattr(request, self.location):\n req_data = getattr(request, self.location)\n elif request.method == 'GET':\n req_data = request.args\n else:\n req_data = request.json\n value = self.default\n if req_data and self.name in req_data:\n value = req_data[self.name]\n elif self.required:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is required! %s' % (self.name, self.help))\n elif self.discard:\n return (None, False)\n if value is None:\n if not self.nullable:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is not nullable! %s' % (self.name, self.help))\n elif not self.ignore and (not isinstance(value, self.type)):\n raise ApiError(StatusDef.PARAM_ERROR, 'TypeError: `%s` is %s! %s' % (self.name, self.type, self.help))\n if isinstance(self.choices, abc.Iterable) and value not in self.choices:\n raise ApiError(StatusDef.PARAM_ERROR, 'ValueError: `%s` must in %s!%s' % (self.name, self.choices, self.help))\n return (value, True)\n<|end_body_1|>\n", "revision_id": "7877724c7875fad0297f7801910f162d80c5d695", "skeleton": "<|skeleton|>\nclass Param:\n\n def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''):\n \"\"\"请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\"\"\"\n <|body_0|>\n\n def load(self):\n \"\"\"解析并检查参数取值 Returns: value(值), valid(是否有效)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Param:\n def __init__(self, name, type=str, required=False, default=None, nullable=True, ignore=False, choices=None, location=None, discard=False, help=''):\n \"\"\"请求参数对象 Args: name (str): 字段名 type (class): 类型 required (bool): 是否必填 default (any): 默认值 (必填时无效) nullable (bool): 是否非空 ignore (bool): 是否忽略类型 choices (tuple): 可选值 location (str): 访问数据源 args, json, form 默认 GET: args, POST: json discard (bool): 不存在 key, 是否不解析参数 help (str): 参数不匹配时, 返回提示\"\"\"\n self.name = name\n self.type = type\n self.required = required\n self.default = default\n self.ignore = ignore\n self.choices = choices\n self.nullable = nullable\n self.location = location\n self.discard = discard\n self.help = help\n\n def load(self):\n \"\"\"解析并检查参数取值 Returns: value(值), valid(是否有效)\"\"\"\n if self.location and hasattr(request, self.location):\n req_data = getattr(request, self.location)\n elif request.method == 'GET':\n req_data = request.args\n else:\n req_data = request.json\n value = self.default\n if req_data and self.name in req_data:\n value = req_data[self.name]\n elif self.required:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is required! %s' % (self.name, self.help))\n elif self.discard:\n return (None, False)\n if value is None:\n if not self.nullable:\n raise ApiError(StatusDef.PARAM_ERROR, '`%s` is not nullable! %s' % (self.name, self.help))\n elif not self.ignore and (not isinstance(value, self.type)):\n raise ApiError(StatusDef.PARAM_ERROR, 'TypeError: `%s` is %s! %s' % (self.name, self.type, self.help))\n if isinstance(self.choices, abc.Iterable) and value not in self.choices:\n raise ApiError(StatusDef.PARAM_ERROR, 'ValueError: `%s` must in %s!%s' % (self.name, self.choices, self.help))\n return (value, True)\n", "source": "the_stack_v2_python_sparse", "source_path": "base/params.py", "source_repo": "HeyManLean/RedisApp", "split": "val", "star_events_count": 0} {"blob_id": "fec16c9a90fdd5bdfa66b122f2004dbbaeeccb07", "bodies": ["while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n break\nreturn 1 + (n - 1) % 10", "while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n return 1 + (n - 1) % 10\n r = n - 40\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 60:\n return 1 + (n - 1) % 10\n r = n - 60\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 20:\n return 1 + (n - 1) % 10"], "bodies_text": "<|body_start_0|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n break\n return 1 + (n - 1) % 10\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n return 1 + (n - 1) % 10\n r = n - 40\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 60:\n return 1 + (n - 1) % 10\n r = n - 60\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 20:\n return 1 + (n - 1) % 10\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\":rtype: int\"\"\"\n <|body_0|>\n\n def rand10_1(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n break\n return 1 + (n - 1) % 10\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n return 1 + (n - 1) % 10\n r = n - 40\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 60:\n return 1 + (n - 1) % 10\n r = n - 60\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 20:\n return 1 + (n - 1) % 10\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000126", "length_bytes": 996, "license_type": "no_license", "methods": [{"docstring": ":rtype: int", "name": "rand10", "signature": "def rand10(self)"}, {"docstring": ":rtype: int", "name": "rand10_1", "signature": "def rand10_1(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rand10(self): :rtype: int\n- def rand10_1(self): :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rand10(self): :rtype: int\n- def rand10_1(self): :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\":rtype: int\"\"\"\n <|body_0|>\n\n def rand10_1(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n break\n return 1 + (n - 1) % 10\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n return 1 + (n - 1) % 10\n r = n - 40\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 60:\n return 1 + (n - 1) % 10\n r = n - 60\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 20:\n return 1 + (n - 1) % 10\n<|end_body_1|>\n", "revision_id": "394e89fd1881f4aa32e2fd81abc72dbc9eeec7bf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\":rtype: int\"\"\"\n <|body_0|>\n\n def rand10_1(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def rand10(self):\n \"\"\":rtype: int\"\"\"\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n break\n return 1 + (n - 1) % 10\n\n def rand10_1(self):\n \"\"\":rtype: int\"\"\"\n while True:\n r = rand7()\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 40:\n return 1 + (n - 1) % 10\n r = n - 40\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 60:\n return 1 + (n - 1) % 10\n r = n - 60\n c = rand7()\n n = c + (r - 1) * 7\n if n <= 20:\n return 1 + (n - 1) % 10\n", "source": "the_stack_v2_python_sparse", "source_path": "Random/rand10.py", "source_repo": "Miracle-cl/Algorithms", "split": "val", "star_events_count": 1} {"blob_id": "c1dd343ce4c81426613f70d8ef7c0458d47866d1", "bodies": ["self._ir_api = irish_rail\nself.station = station\nself.direction = direction\nself.destination = destination\nself.stops_at = stops_at\nself.info = self._empty_train_data()", "trains = self._ir_api.get_station_by_name(self.station, direction=self.direction, destination=self.destination, stops_at=self.stops_at)\nstops_at = self.stops_at if self.stops_at else ''\nself.info = []\nfor train in trains:\n train_data = {ATTR_STATION: self.station, ATTR_ORIGIN: train.get('origin'), ATTR_DESTINATION: train.get('destination'), ATTR_DUE_IN: train.get('due_in_mins'), ATTR_DUE_AT: train.get('scheduled_arrival_time'), ATTR_EXPECT_AT: train.get('expected_departure_time'), ATTR_DIRECTION: train.get('direction'), ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: train.get('type')}\n self.info.append(train_data)\nif not self.info:\n self.info = self._empty_train_data()", "dest = self.destination if self.destination else ''\ndirection = self.direction if self.direction else ''\nstops_at = self.stops_at if self.stops_at else ''\nreturn [{ATTR_STATION: self.station, ATTR_ORIGIN: '', ATTR_DESTINATION: dest, ATTR_DUE_IN: 'n/a', ATTR_DUE_AT: 'n/a', ATTR_EXPECT_AT: 'n/a', ATTR_DIRECTION: direction, ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: ''}]"], "bodies_text": "<|body_start_0|>\n self._ir_api = irish_rail\n self.station = station\n self.direction = direction\n self.destination = destination\n self.stops_at = stops_at\n self.info = self._empty_train_data()\n<|end_body_0|>\n\n<|body_start_1|>\n trains = self._ir_api.get_station_by_name(self.station, direction=self.direction, destination=self.destination, stops_at=self.stops_at)\n stops_at = self.stops_at if self.stops_at else ''\n self.info = []\n for train in trains:\n train_data = {ATTR_STATION: self.station, ATTR_ORIGIN: train.get('origin'), ATTR_DESTINATION: train.get('destination'), ATTR_DUE_IN: train.get('due_in_mins'), ATTR_DUE_AT: train.get('scheduled_arrival_time'), ATTR_EXPECT_AT: train.get('expected_departure_time'), ATTR_DIRECTION: train.get('direction'), ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: train.get('type')}\n self.info.append(train_data)\n if not self.info:\n self.info = self._empty_train_data()\n<|end_body_1|>\n\n<|body_start_2|>\n dest = self.destination if self.destination else ''\n direction = self.direction if self.direction else ''\n stops_at = self.stops_at if self.stops_at else ''\n return [{ATTR_STATION: self.station, ATTR_ORIGIN: '', ATTR_DESTINATION: dest, ATTR_DUE_IN: 'n/a', ATTR_DUE_AT: 'n/a', ATTR_EXPECT_AT: 'n/a', ATTR_DIRECTION: direction, ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: ''}]\n<|end_body_2|>\n", "class_docstring": "The Class for handling the data retrieval.", "class_name": "IrishRailTransportData", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IrishRailTransportData:\n \"\"\"The Class for handling the data retrieval.\"\"\"\n\n def __init__(self, irish_rail, station, direction, destination, stops_at):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from irishrail.\"\"\"\n <|body_1|>\n\n def _empty_train_data(self):\n \"\"\"Generate info for an empty train.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._ir_api = irish_rail\n self.station = station\n self.direction = direction\n self.destination = destination\n self.stops_at = stops_at\n self.info = self._empty_train_data()\n<|end_body_0|>\n\n<|body_start_1|>\n trains = self._ir_api.get_station_by_name(self.station, direction=self.direction, destination=self.destination, stops_at=self.stops_at)\n stops_at = self.stops_at if self.stops_at else ''\n self.info = []\n for train in trains:\n train_data = {ATTR_STATION: self.station, ATTR_ORIGIN: train.get('origin'), ATTR_DESTINATION: train.get('destination'), ATTR_DUE_IN: train.get('due_in_mins'), ATTR_DUE_AT: train.get('scheduled_arrival_time'), ATTR_EXPECT_AT: train.get('expected_departure_time'), ATTR_DIRECTION: train.get('direction'), ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: train.get('type')}\n self.info.append(train_data)\n if not self.info:\n self.info = self._empty_train_data()\n<|end_body_1|>\n\n<|body_start_2|>\n dest = self.destination if self.destination else ''\n direction = self.direction if self.direction else ''\n stops_at = self.stops_at if self.stops_at else ''\n return [{ATTR_STATION: self.station, ATTR_ORIGIN: '', ATTR_DESTINATION: dest, ATTR_DUE_IN: 'n/a', ATTR_DUE_AT: 'n/a', ATTR_EXPECT_AT: 'n/a', ATTR_DIRECTION: direction, ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: ''}]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000127", "length_bytes": 6444, "license_type": "permissive", "methods": [{"docstring": "Initialize the data object.", "name": "__init__", "signature": "def __init__(self, irish_rail, station, direction, destination, stops_at)"}, {"docstring": "Get the latest data from irishrail.", "name": "update", "signature": "def update(self)"}, {"docstring": "Generate info for an empty train.", "name": "_empty_train_data", "signature": "def _empty_train_data(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001314", "prompt": "Implement the Python class `IrishRailTransportData` described below.\n\nClass description:\nThe Class for handling the data retrieval.\n\nMethod signatures and docstrings:\n- def __init__(self, irish_rail, station, direction, destination, stops_at): Initialize the data object.\n- def update(self): Get the latest data from irishrail.\n- def _empty_train_data(self): Generate info for an empty train.", "prompted_full_text": "Implement the Python class `IrishRailTransportData` described below.\n\nClass description:\nThe Class for handling the data retrieval.\n\nMethod signatures and docstrings:\n- def __init__(self, irish_rail, station, direction, destination, stops_at): Initialize the data object.\n- def update(self): Get the latest data from irishrail.\n- def _empty_train_data(self): Generate info for an empty train.\n\n<|skeleton|>\nclass IrishRailTransportData:\n \"\"\"The Class for handling the data retrieval.\"\"\"\n\n def __init__(self, irish_rail, station, direction, destination, stops_at):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from irishrail.\"\"\"\n <|body_1|>\n\n def _empty_train_data(self):\n \"\"\"Generate info for an empty train.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._ir_api = irish_rail\n self.station = station\n self.direction = direction\n self.destination = destination\n self.stops_at = stops_at\n self.info = self._empty_train_data()\n<|end_body_0|>\n\n<|body_start_1|>\n trains = self._ir_api.get_station_by_name(self.station, direction=self.direction, destination=self.destination, stops_at=self.stops_at)\n stops_at = self.stops_at if self.stops_at else ''\n self.info = []\n for train in trains:\n train_data = {ATTR_STATION: self.station, ATTR_ORIGIN: train.get('origin'), ATTR_DESTINATION: train.get('destination'), ATTR_DUE_IN: train.get('due_in_mins'), ATTR_DUE_AT: train.get('scheduled_arrival_time'), ATTR_EXPECT_AT: train.get('expected_departure_time'), ATTR_DIRECTION: train.get('direction'), ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: train.get('type')}\n self.info.append(train_data)\n if not self.info:\n self.info = self._empty_train_data()\n<|end_body_1|>\n\n<|body_start_2|>\n dest = self.destination if self.destination else ''\n direction = self.direction if self.direction else ''\n stops_at = self.stops_at if self.stops_at else ''\n return [{ATTR_STATION: self.station, ATTR_ORIGIN: '', ATTR_DESTINATION: dest, ATTR_DUE_IN: 'n/a', ATTR_DUE_AT: 'n/a', ATTR_EXPECT_AT: 'n/a', ATTR_DIRECTION: direction, ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: ''}]\n<|end_body_2|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass IrishRailTransportData:\n \"\"\"The Class for handling the data retrieval.\"\"\"\n\n def __init__(self, irish_rail, station, direction, destination, stops_at):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from irishrail.\"\"\"\n <|body_1|>\n\n def _empty_train_data(self):\n \"\"\"Generate info for an empty train.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IrishRailTransportData:\n \"\"\"The Class for handling the data retrieval.\"\"\"\n\n def __init__(self, irish_rail, station, direction, destination, stops_at):\n \"\"\"Initialize the data object.\"\"\"\n self._ir_api = irish_rail\n self.station = station\n self.direction = direction\n self.destination = destination\n self.stops_at = stops_at\n self.info = self._empty_train_data()\n\n def update(self):\n \"\"\"Get the latest data from irishrail.\"\"\"\n trains = self._ir_api.get_station_by_name(self.station, direction=self.direction, destination=self.destination, stops_at=self.stops_at)\n stops_at = self.stops_at if self.stops_at else ''\n self.info = []\n for train in trains:\n train_data = {ATTR_STATION: self.station, ATTR_ORIGIN: train.get('origin'), ATTR_DESTINATION: train.get('destination'), ATTR_DUE_IN: train.get('due_in_mins'), ATTR_DUE_AT: train.get('scheduled_arrival_time'), ATTR_EXPECT_AT: train.get('expected_departure_time'), ATTR_DIRECTION: train.get('direction'), ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: train.get('type')}\n self.info.append(train_data)\n if not self.info:\n self.info = self._empty_train_data()\n\n def _empty_train_data(self):\n \"\"\"Generate info for an empty train.\"\"\"\n dest = self.destination if self.destination else ''\n direction = self.direction if self.direction else ''\n stops_at = self.stops_at if self.stops_at else ''\n return [{ATTR_STATION: self.station, ATTR_ORIGIN: '', ATTR_DESTINATION: dest, ATTR_DUE_IN: 'n/a', ATTR_DUE_AT: 'n/a', ATTR_EXPECT_AT: 'n/a', ATTR_DIRECTION: direction, ATTR_STOPS_AT: stops_at, ATTR_TRAIN_TYPE: ''}]\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/irish_rail_transport/sensor.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "005e82747d80348c953b99e0be6367fbbde8e836", "bodies": ["self.host = host\nself.port = port\nself.connection = None\nself.database = None\nself.product = None\nself.customer = None\nself.rental = None", "if self.connection is None:\n try:\n self.connection = MongoClient(self.host, self.port)\n LOGGER.info('connected to mongo')\n LOGGER.info('entering host %s via port %s ', self.host, self.port)\n self.database = self.connection.FlorentinDB\n self.product = self.database['product']\n self.customer = self.database['customer']\n self.rental = self.database['rental']\n LOGGER.info('database %s established on mongo', self.database)\n LOGGER.info('collections %s, %s, %s available in %s', self.product, self.customer, self.rental, self.database)\n except mer.ConnectionFailure as err:\n LOGGER.info('error connecting to mongo\\n %s', err)\nreturn self", "if exc_type:\n pp.pprint(f'exc_type: {exc_type}')\n pp.pprint(f'exc_val: {exc_val}')\n pp.pprint(f'exc_tb: {exc_tb}')\nself.connection.close()\nLOGGER.info('disconected from mongo')"], "bodies_text": "<|body_start_0|>\n self.host = host\n self.port = port\n self.connection = None\n self.database = None\n self.product = None\n self.customer = None\n self.rental = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.connection is None:\n try:\n self.connection = MongoClient(self.host, self.port)\n LOGGER.info('connected to mongo')\n LOGGER.info('entering host %s via port %s ', self.host, self.port)\n self.database = self.connection.FlorentinDB\n self.product = self.database['product']\n self.customer = self.database['customer']\n self.rental = self.database['rental']\n LOGGER.info('database %s established on mongo', self.database)\n LOGGER.info('collections %s, %s, %s available in %s', self.product, self.customer, self.rental, self.database)\n except mer.ConnectionFailure as err:\n LOGGER.info('error connecting to mongo\\n %s', err)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if exc_type:\n pp.pprint(f'exc_type: {exc_type}')\n pp.pprint(f'exc_val: {exc_val}')\n pp.pprint(f'exc_tb: {exc_tb}')\n self.connection.close()\n LOGGER.info('disconected from mongo')\n<|end_body_2|>\n", "class_docstring": "establish MongoDB connection (per assignment's example)", "class_name": "MongoDBConnection", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MongoDBConnection:\n \"\"\"establish MongoDB connection (per assignment's example)\"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\"use public ip-address and port\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"enter\"\"\"\n <|body_1|>\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"exit\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.host = host\n self.port = port\n self.connection = None\n self.database = None\n self.product = None\n self.customer = None\n self.rental = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.connection is None:\n try:\n self.connection = MongoClient(self.host, self.port)\n LOGGER.info('connected to mongo')\n LOGGER.info('entering host %s via port %s ', self.host, self.port)\n self.database = self.connection.FlorentinDB\n self.product = self.database['product']\n self.customer = self.database['customer']\n self.rental = self.database['rental']\n LOGGER.info('database %s established on mongo', self.database)\n LOGGER.info('collections %s, %s, %s available in %s', self.product, self.customer, self.rental, self.database)\n except mer.ConnectionFailure as err:\n LOGGER.info('error connecting to mongo\\n %s', err)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if exc_type:\n pp.pprint(f'exc_type: {exc_type}')\n pp.pprint(f'exc_val: {exc_val}')\n pp.pprint(f'exc_tb: {exc_tb}')\n self.connection.close()\n LOGGER.info('disconected from mongo')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000128", "length_bytes": 7711, "license_type": "no_license", "methods": [{"docstring": "use public ip-address and port", "name": "__init__", "signature": "def __init__(self, host='127.0.0.1', port=27017)"}, {"docstring": "enter", "name": "__enter__", "signature": "def __enter__(self)"}, {"docstring": "exit", "name": "__exit__", "signature": "def __exit__(self, exc_type, exc_val, exc_tb)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006154", "prompt": "Implement the Python class `MongoDBConnection` described below.\n\nClass description:\nestablish MongoDB connection (per assignment's example)\n\nMethod signatures and docstrings:\n- def __init__(self, host='127.0.0.1', port=27017): use public ip-address and port\n- def __enter__(self): enter\n- def __exit__(self, exc_type, exc_val, exc_tb): exit", "prompted_full_text": "Implement the Python class `MongoDBConnection` described below.\n\nClass description:\nestablish MongoDB connection (per assignment's example)\n\nMethod signatures and docstrings:\n- def __init__(self, host='127.0.0.1', port=27017): use public ip-address and port\n- def __enter__(self): enter\n- def __exit__(self, exc_type, exc_val, exc_tb): exit\n\n<|skeleton|>\nclass MongoDBConnection:\n \"\"\"establish MongoDB connection (per assignment's example)\"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\"use public ip-address and port\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"enter\"\"\"\n <|body_1|>\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"exit\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.host = host\n self.port = port\n self.connection = None\n self.database = None\n self.product = None\n self.customer = None\n self.rental = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.connection is None:\n try:\n self.connection = MongoClient(self.host, self.port)\n LOGGER.info('connected to mongo')\n LOGGER.info('entering host %s via port %s ', self.host, self.port)\n self.database = self.connection.FlorentinDB\n self.product = self.database['product']\n self.customer = self.database['customer']\n self.rental = self.database['rental']\n LOGGER.info('database %s established on mongo', self.database)\n LOGGER.info('collections %s, %s, %s available in %s', self.product, self.customer, self.rental, self.database)\n except mer.ConnectionFailure as err:\n LOGGER.info('error connecting to mongo\\n %s', err)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if exc_type:\n pp.pprint(f'exc_type: {exc_type}')\n pp.pprint(f'exc_val: {exc_val}')\n pp.pprint(f'exc_tb: {exc_tb}')\n self.connection.close()\n LOGGER.info('disconected from mongo')\n<|end_body_2|>\n", "revision_id": "5dac60f39e3909ff05b26721d602ed20f14d6be3", "skeleton": "<|skeleton|>\nclass MongoDBConnection:\n \"\"\"establish MongoDB connection (per assignment's example)\"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\"use public ip-address and port\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"enter\"\"\"\n <|body_1|>\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"exit\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MongoDBConnection:\n \"\"\"establish MongoDB connection (per assignment's example)\"\"\"\n\n def __init__(self, host='127.0.0.1', port=27017):\n \"\"\"use public ip-address and port\"\"\"\n self.host = host\n self.port = port\n self.connection = None\n self.database = None\n self.product = None\n self.customer = None\n self.rental = None\n\n def __enter__(self):\n \"\"\"enter\"\"\"\n if self.connection is None:\n try:\n self.connection = MongoClient(self.host, self.port)\n LOGGER.info('connected to mongo')\n LOGGER.info('entering host %s via port %s ', self.host, self.port)\n self.database = self.connection.FlorentinDB\n self.product = self.database['product']\n self.customer = self.database['customer']\n self.rental = self.database['rental']\n LOGGER.info('database %s established on mongo', self.database)\n LOGGER.info('collections %s, %s, %s available in %s', self.product, self.customer, self.rental, self.database)\n except mer.ConnectionFailure as err:\n LOGGER.info('error connecting to mongo\\n %s', err)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"exit\"\"\"\n if exc_type:\n pp.pprint(f'exc_type: {exc_type}')\n pp.pprint(f'exc_val: {exc_val}')\n pp.pprint(f'exc_tb: {exc_tb}')\n self.connection.close()\n LOGGER.info('disconected from mongo')\n", "source": "the_stack_v2_python_sparse", "source_path": "students/florentin_popescu/Lesson_09/database.py", "source_repo": "JavaRod/SP_Python220B_2019", "split": "val", "star_events_count": 1} {"blob_id": "31c7d54cb2ebf974366c31050cedde8cf2113d27", "bodies": ["super(Swish, self).__init__()\nself.swish = swish\nself.eswish = eswish\nself.flatten = flatten\nself.beta = None\nself.param = None\nif eswish is not False:\n self.beta = beta\nif swish is not False:\n self.param = nn.Parameter(torch.randn(1))\n self.param.requires_grad = True\nif flatten is not False:\n if pfts is not False:\n self.const = nn.Parameter(torch.tensor(-0.2))\n self.const.requires_grad = True\n else:\n self.const = -0.2\nif eswish is not False and swish is not False and (flatten is not False):\n raise RuntimeError('Advisable to run either Swish or E-Swish or Flatten T-Swish')", "if self.swish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\nif self.eswish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\nif self.flatten is not False:\n return (input >= 0).float() * (input * swish_function(input, self.swish, self.eswish, self.beta, self.param) + self.const) + (input < 0).float() * self.const"], "bodies_text": "<|body_start_0|>\n super(Swish, self).__init__()\n self.swish = swish\n self.eswish = eswish\n self.flatten = flatten\n self.beta = None\n self.param = None\n if eswish is not False:\n self.beta = beta\n if swish is not False:\n self.param = nn.Parameter(torch.randn(1))\n self.param.requires_grad = True\n if flatten is not False:\n if pfts is not False:\n self.const = nn.Parameter(torch.tensor(-0.2))\n self.const.requires_grad = True\n else:\n self.const = -0.2\n if eswish is not False and swish is not False and (flatten is not False):\n raise RuntimeError('Advisable to run either Swish or E-Swish or Flatten T-Swish')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.swish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.eswish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.flatten is not False:\n return (input >= 0).float() * (input * swish_function(input, self.swish, self.eswish, self.beta, self.param) + self.const) + (input < 0).float() * self.const\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Swish", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Swish:\n\n def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward pass of the function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Swish, self).__init__()\n self.swish = swish\n self.eswish = eswish\n self.flatten = flatten\n self.beta = None\n self.param = None\n if eswish is not False:\n self.beta = beta\n if swish is not False:\n self.param = nn.Parameter(torch.randn(1))\n self.param.requires_grad = True\n if flatten is not False:\n if pfts is not False:\n self.const = nn.Parameter(torch.tensor(-0.2))\n self.const.requires_grad = True\n else:\n self.const = -0.2\n if eswish is not False and swish is not False and (flatten is not False):\n raise RuntimeError('Advisable to run either Swish or E-Swish or Flatten T-Swish')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.swish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.eswish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.flatten is not False:\n return (input >= 0).float() * (input * swish_function(input, self.swish, self.eswish, self.beta, self.param) + self.const) + (input < 0).float() * self.const\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000129", "length_bytes": 32265, "license_type": "no_license", "methods": [{"docstring": "Init method.", "name": "__init__", "signature": "def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False)"}, {"docstring": "Forward pass of the function.", "name": "forward", "signature": "def forward(self, input)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006184", "prompt": "Implement the Python class `Swish` described below.\n\nClass description:\nImplement the Swish class.\n\nMethod signatures and docstrings:\n- def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False): Init method.\n- def forward(self, input): Forward pass of the function.", "prompted_full_text": "Implement the Python class `Swish` described below.\n\nClass description:\nImplement the Swish class.\n\nMethod signatures and docstrings:\n- def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False): Init method.\n- def forward(self, input): Forward pass of the function.\n\n<|skeleton|>\nclass Swish:\n\n def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward pass of the function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Swish, self).__init__()\n self.swish = swish\n self.eswish = eswish\n self.flatten = flatten\n self.beta = None\n self.param = None\n if eswish is not False:\n self.beta = beta\n if swish is not False:\n self.param = nn.Parameter(torch.randn(1))\n self.param.requires_grad = True\n if flatten is not False:\n if pfts is not False:\n self.const = nn.Parameter(torch.tensor(-0.2))\n self.const.requires_grad = True\n else:\n self.const = -0.2\n if eswish is not False and swish is not False and (flatten is not False):\n raise RuntimeError('Advisable to run either Swish or E-Swish or Flatten T-Swish')\n<|end_body_0|>\n\n<|body_start_1|>\n if self.swish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.eswish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.flatten is not False:\n return (input >= 0).float() * (input * swish_function(input, self.swish, self.eswish, self.beta, self.param) + self.const) + (input < 0).float() * self.const\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass Swish:\n\n def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward pass of the function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Swish:\n def __init__(self, eswish=False, swish=False, beta=1.735, flatten=False, pfts=False):\n \"\"\"Init method.\"\"\"\n super(Swish, self).__init__()\n self.swish = swish\n self.eswish = eswish\n self.flatten = flatten\n self.beta = None\n self.param = None\n if eswish is not False:\n self.beta = beta\n if swish is not False:\n self.param = nn.Parameter(torch.randn(1))\n self.param.requires_grad = True\n if flatten is not False:\n if pfts is not False:\n self.const = nn.Parameter(torch.tensor(-0.2))\n self.const.requires_grad = True\n else:\n self.const = -0.2\n if eswish is not False and swish is not False and (flatten is not False):\n raise RuntimeError('Advisable to run either Swish or E-Swish or Flatten T-Swish')\n\n def forward(self, input):\n \"\"\"Forward pass of the function.\"\"\"\n if self.swish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.eswish is not False:\n return swish_function(input, self.swish, self.eswish, self.beta, self.param)\n if self.flatten is not False:\n return (input >= 0).float() * (input * swish_function(input, self.swish, self.eswish, self.beta, self.param) + self.const) + (input < 0).float() * self.const\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_digantamisra98_Echo.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "c42e3c3ba0683603dd404141af21a351782a3690", "bodies": ["ordered_uuids = [(k, v) for k, v in value.items()]\nordered_uuids.sort(key=lambda x: x[1]['order'])\nreturn '\\r\\n'.join([i[0] for i in ordered_uuids])", "if not len(value) or not isinstance(value, dict):\n return self.field.missing_value\nreturn value"], "bodies_text": "<|body_start_0|>\n ordered_uuids = [(k, v) for k, v in value.items()]\n ordered_uuids.sort(key=lambda x: x[1]['order'])\n return '\\r\\n'.join([i[0] for i in ordered_uuids])\n<|end_body_0|>\n\n<|body_start_1|>\n if not len(value) or not isinstance(value, dict):\n return self.field.missing_value\n return value\n<|end_body_1|>\n", "class_docstring": "A data converter using the field's ``fromUnicode()`` method.", "class_name": "UUIDSFieldDataConverter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UUIDSFieldDataConverter:\n \"\"\"A data converter using the field's ``fromUnicode()`` method.\"\"\"\n\n def toWidgetValue(self, value):\n \"\"\"Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\"\"\"\n <|body_0|>\n\n def toFieldValue(self, value):\n \"\"\"Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ordered_uuids = [(k, v) for k, v in value.items()]\n ordered_uuids.sort(key=lambda x: x[1]['order'])\n return '\\r\\n'.join([i[0] for i in ordered_uuids])\n<|end_body_0|>\n\n<|body_start_1|>\n if not len(value) or not isinstance(value, dict):\n return self.field.missing_value\n return value\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000130", "length_bytes": 4915, "license_type": "no_license", "methods": [{"docstring": "Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by", "name": "toWidgetValue", "signature": "def toWidgetValue(self, value)"}, {"docstring": "Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile", "name": "toFieldValue", "signature": "def toFieldValue(self, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007348", "prompt": "Implement the Python class `UUIDSFieldDataConverter` described below.\n\nClass description:\nA data converter using the field's ``fromUnicode()`` method.\n\nMethod signatures and docstrings:\n- def toWidgetValue(self, value): Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\n- def toFieldValue(self, value): Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile", "prompted_full_text": "Implement the Python class `UUIDSFieldDataConverter` described below.\n\nClass description:\nA data converter using the field's ``fromUnicode()`` method.\n\nMethod signatures and docstrings:\n- def toWidgetValue(self, value): Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\n- def toFieldValue(self, value): Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile\n\n<|skeleton|>\nclass UUIDSFieldDataConverter:\n \"\"\"A data converter using the field's ``fromUnicode()`` method.\"\"\"\n\n def toWidgetValue(self, value):\n \"\"\"Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\"\"\"\n <|body_0|>\n\n def toFieldValue(self, value):\n \"\"\"Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ordered_uuids = [(k, v) for k, v in value.items()]\n ordered_uuids.sort(key=lambda x: x[1]['order'])\n return '\\r\\n'.join([i[0] for i in ordered_uuids])\n<|end_body_0|>\n\n<|body_start_1|>\n if not len(value) or not isinstance(value, dict):\n return self.field.missing_value\n return value\n<|end_body_1|>\n", "revision_id": "55e273528cd5db4bbd1929a23ef74c3d873ec690", "skeleton": "<|skeleton|>\nclass UUIDSFieldDataConverter:\n \"\"\"A data converter using the field's ``fromUnicode()`` method.\"\"\"\n\n def toWidgetValue(self, value):\n \"\"\"Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\"\"\"\n <|body_0|>\n\n def toFieldValue(self, value):\n \"\"\"Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UUIDSFieldDataConverter:\n \"\"\"A data converter using the field's ``fromUnicode()`` method.\"\"\"\n\n def toWidgetValue(self, value):\n \"\"\"Converts the internal stored value into something that a z3c.form widget understands :param value: [required] The internally stored value :type value: Dict :returns: A string with UUIDs separated by\"\"\"\n ordered_uuids = [(k, v) for k, v in value.items()]\n ordered_uuids.sort(key=lambda x: x[1]['order'])\n return '\\r\\n'.join([i[0] for i in ordered_uuids])\n\n def toFieldValue(self, value):\n \"\"\"Passes the value extracted from the widget to the internal structure. In this case, the value expected is already formatted :param value: [required] The data extracted from the widget :type value: Dict :returns: The value to be stored in the tile\"\"\"\n if not len(value) or not isinstance(value, dict):\n return self.field.missing_value\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "buildout-cache/eggs/collective.cover-1.0a10-py2.7.egg/collective/cover/tiles/carousel.py", "source_repo": "Vinsurya/Plone", "split": "val", "star_events_count": 0} {"blob_id": "4b730d3e38b819b3c47d559efddf8d6c464e81a6", "bodies": ["it = iter(test_inputs.split('\\n')) if test_inputs else None\n\ndef uinput():\n return next(it) if it else sys.stdin.readline().rstrip()\n[self.n] = map(int, uinput().split())\nself.numa = [s == '>' for s in uinput()]\nself.numb = list(map(int, uinput().split()))", "result = 'FINITE'\npos = 0\nvis = set([])\nwhile 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = 'IN' + result\n break\nreturn str(result)"], "bodies_text": "<|body_start_0|>\n it = iter(test_inputs.split('\\n')) if test_inputs else None\n\n def uinput():\n return next(it) if it else sys.stdin.readline().rstrip()\n [self.n] = map(int, uinput().split())\n self.numa = [s == '>' for s in uinput()]\n self.numb = list(map(int, uinput().split()))\n<|end_body_0|>\n\n<|body_start_1|>\n result = 'FINITE'\n pos = 0\n vis = set([])\n while 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = 'IN' + result\n break\n return str(result)\n<|end_body_1|>\n", "class_docstring": "Gh representation", "class_name": "Gh", "detected_licenses": ["Unlicense", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Gh:\n \"\"\"Gh representation\"\"\"\n\n def __init__(self, test_inputs=None):\n \"\"\"Default constructor\"\"\"\n <|body_0|>\n\n def calculate(self):\n \"\"\"Main calcualtion function of the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n it = iter(test_inputs.split('\\n')) if test_inputs else None\n\n def uinput():\n return next(it) if it else sys.stdin.readline().rstrip()\n [self.n] = map(int, uinput().split())\n self.numa = [s == '>' for s in uinput()]\n self.numb = list(map(int, uinput().split()))\n<|end_body_0|>\n\n<|body_start_1|>\n result = 'FINITE'\n pos = 0\n vis = set([])\n while 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = 'IN' + result\n break\n return str(result)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000131", "length_bytes": 3180, "license_type": "permissive", "methods": [{"docstring": "Default constructor", "name": "__init__", "signature": "def __init__(self, test_inputs=None)"}, {"docstring": "Main calcualtion function of the class", "name": "calculate", "signature": "def calculate(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006266", "prompt": "Implement the Python class `Gh` described below.\n\nClass description:\nGh representation\n\nMethod signatures and docstrings:\n- def __init__(self, test_inputs=None): Default constructor\n- def calculate(self): Main calcualtion function of the class", "prompted_full_text": "Implement the Python class `Gh` described below.\n\nClass description:\nGh representation\n\nMethod signatures and docstrings:\n- def __init__(self, test_inputs=None): Default constructor\n- def calculate(self): Main calcualtion function of the class\n\n<|skeleton|>\nclass Gh:\n \"\"\"Gh representation\"\"\"\n\n def __init__(self, test_inputs=None):\n \"\"\"Default constructor\"\"\"\n <|body_0|>\n\n def calculate(self):\n \"\"\"Main calcualtion function of the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n it = iter(test_inputs.split('\\n')) if test_inputs else None\n\n def uinput():\n return next(it) if it else sys.stdin.readline().rstrip()\n [self.n] = map(int, uinput().split())\n self.numa = [s == '>' for s in uinput()]\n self.numb = list(map(int, uinput().split()))\n<|end_body_0|>\n\n<|body_start_1|>\n result = 'FINITE'\n pos = 0\n vis = set([])\n while 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = 'IN' + result\n break\n return str(result)\n<|end_body_1|>\n", "revision_id": "ae02ea872ca91ef98630cc172a844b82cc56f621", "skeleton": "<|skeleton|>\nclass Gh:\n \"\"\"Gh representation\"\"\"\n\n def __init__(self, test_inputs=None):\n \"\"\"Default constructor\"\"\"\n <|body_0|>\n\n def calculate(self):\n \"\"\"Main calcualtion function of the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Gh:\n \"\"\"Gh representation\"\"\"\n\n def __init__(self, test_inputs=None):\n \"\"\"Default constructor\"\"\"\n it = iter(test_inputs.split('\\n')) if test_inputs else None\n\n def uinput():\n return next(it) if it else sys.stdin.readline().rstrip()\n [self.n] = map(int, uinput().split())\n self.numa = [s == '>' for s in uinput()]\n self.numb = list(map(int, uinput().split()))\n\n def calculate(self):\n \"\"\"Main calcualtion function of the class\"\"\"\n result = 'FINITE'\n pos = 0\n vis = set([])\n while 0 <= pos < self.n:\n vis.add(pos)\n if self.numa[pos]:\n pos += self.numb[pos]\n else:\n pos -= self.numb[pos]\n if pos in vis:\n result = 'IN' + result\n break\n return str(result)\n", "source": "the_stack_v2_python_sparse", "source_path": "codeforces/669B_gh.py", "source_repo": "snsokolov/contests", "split": "val", "star_events_count": 1} {"blob_id": "d3dcd82fe22869609945cb73474af1c159388268", "bodies": ["if role_str == 'edit':\n return Role.edit\nif role_str == 'install':\n return Role.install\nif role_str == 'view':\n return Role.view\nreturn Role.denied", "if role == Role.edit:\n return 'edit'\nif role == Role.install:\n return 'install'\nif role == Role.view:\n return 'view'\nreturn 'denied'"], "bodies_text": "<|body_start_0|>\n if role_str == 'edit':\n return Role.edit\n if role_str == 'install':\n return Role.install\n if role_str == 'view':\n return Role.view\n return Role.denied\n<|end_body_0|>\n\n<|body_start_1|>\n if role == Role.edit:\n return 'edit'\n if role == Role.install:\n return 'install'\n if role == Role.view:\n return 'view'\n return 'denied'\n<|end_body_1|>\n", "class_docstring": "Defines types of action a user can perform on a team and or project", "class_name": "Role", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Role:\n \"\"\"Defines types of action a user can perform on a team and or project\"\"\"\n\n def from_str(role_str):\n \"\"\"Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\"\"\"\n <|body_0|>\n\n def to_str(role):\n \"\"\"Convert a Role object into a string. Args: role: (Role) Returns: (str)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if role_str == 'edit':\n return Role.edit\n if role_str == 'install':\n return Role.install\n if role_str == 'view':\n return Role.view\n return Role.denied\n<|end_body_0|>\n\n<|body_start_1|>\n if role == Role.edit:\n return 'edit'\n if role == Role.install:\n return 'install'\n if role == Role.view:\n return 'view'\n return 'denied'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000132", "length_bytes": 3240, "license_type": "no_license", "methods": [{"docstring": "Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)", "name": "from_str", "signature": "def from_str(role_str)"}, {"docstring": "Convert a Role object into a string. Args: role: (Role) Returns: (str)", "name": "to_str", "signature": "def to_str(role)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006135", "prompt": "Implement the Python class `Role` described below.\n\nClass description:\nDefines types of action a user can perform on a team and or project\n\nMethod signatures and docstrings:\n- def from_str(role_str): Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\n- def to_str(role): Convert a Role object into a string. Args: role: (Role) Returns: (str)", "prompted_full_text": "Implement the Python class `Role` described below.\n\nClass description:\nDefines types of action a user can perform on a team and or project\n\nMethod signatures and docstrings:\n- def from_str(role_str): Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\n- def to_str(role): Convert a Role object into a string. Args: role: (Role) Returns: (str)\n\n<|skeleton|>\nclass Role:\n \"\"\"Defines types of action a user can perform on a team and or project\"\"\"\n\n def from_str(role_str):\n \"\"\"Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\"\"\"\n <|body_0|>\n\n def to_str(role):\n \"\"\"Convert a Role object into a string. Args: role: (Role) Returns: (str)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if role_str == 'edit':\n return Role.edit\n if role_str == 'install':\n return Role.install\n if role_str == 'view':\n return Role.view\n return Role.denied\n<|end_body_0|>\n\n<|body_start_1|>\n if role == Role.edit:\n return 'edit'\n if role == Role.install:\n return 'install'\n if role == Role.view:\n return 'view'\n return 'denied'\n<|end_body_1|>\n", "revision_id": "ff1feea27efa6544c0e443b953951bb50cbdd9bb", "skeleton": "<|skeleton|>\nclass Role:\n \"\"\"Defines types of action a user can perform on a team and or project\"\"\"\n\n def from_str(role_str):\n \"\"\"Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\"\"\"\n <|body_0|>\n\n def to_str(role):\n \"\"\"Convert a Role object into a string. Args: role: (Role) Returns: (str)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Role:\n \"\"\"Defines types of action a user can perform on a team and or project\"\"\"\n\n def from_str(role_str):\n \"\"\"Convert a string into a Role object Args: role_str: (str) String representation of a Role Returns: (Role)\"\"\"\n if role_str == 'edit':\n return Role.edit\n if role_str == 'install':\n return Role.install\n if role_str == 'view':\n return Role.view\n return Role.denied\n\n def to_str(role):\n \"\"\"Convert a Role object into a string. Args: role: (Role) Returns: (str)\"\"\"\n if role == Role.edit:\n return 'edit'\n if role == Role.install:\n return 'install'\n if role == Role.view:\n return 'view'\n return 'denied'\n", "source": "the_stack_v2_python_sparse", "source_path": "seeweb/models/auth.py", "source_repo": "pradal/seeweb", "split": "val", "star_events_count": 0} {"blob_id": "b2e24849ae73ecebdeaa13c2c03efc5339dd2a9a", "bodies": ["self.h5_filename_queue = h5_filename_queue\ntables.open_file(table_path, 'w').close()\nsuper(WriteHDF5Thread, self).__init__()", "while True:\n if not self.h5_filename_queue.empty():\n temp_path = self.h5_filename_queue.get()\n log.debug('Writing %s to %s' % (temp_path, table_path))\n try:\n with tables.open_file(temp_path, 'a') as hdf5temp:\n with tables.open_file(table_path, 'a') as hdf5:\n temp_table = hdf5temp.root.values\n temp_status_table = hdf5temp.root.status\n if not hasattr(hdf5.root, 'values'):\n temp_table.copy(hdf5.root, 'values')\n temp_status_table.copy(hdf5.root, 'status')\n else:\n table = hdf5.root.values\n status_table = hdf5.root.status\n table.append(temp_table[:])\n status_table.append(temp_status_table[:])\n table.flush()\n status_table.flush()\n except StandardError as e:\n log.exception('Could not read hdf5 file')\n finally:\n log.debug('Clean up: removing %s' % temp_path)\n if os.path.exists(temp_path):\n os.remove(temp_path)\n if stop_write_thread is True:\n log.debug('Ending HDF5 write thread')\n break"], "bodies_text": "<|body_start_0|>\n self.h5_filename_queue = h5_filename_queue\n tables.open_file(table_path, 'w').close()\n super(WriteHDF5Thread, self).__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n if not self.h5_filename_queue.empty():\n temp_path = self.h5_filename_queue.get()\n log.debug('Writing %s to %s' % (temp_path, table_path))\n try:\n with tables.open_file(temp_path, 'a') as hdf5temp:\n with tables.open_file(table_path, 'a') as hdf5:\n temp_table = hdf5temp.root.values\n temp_status_table = hdf5temp.root.status\n if not hasattr(hdf5.root, 'values'):\n temp_table.copy(hdf5.root, 'values')\n temp_status_table.copy(hdf5.root, 'status')\n else:\n table = hdf5.root.values\n status_table = hdf5.root.status\n table.append(temp_table[:])\n status_table.append(temp_status_table[:])\n table.flush()\n status_table.flush()\n except StandardError as e:\n log.exception('Could not read hdf5 file')\n finally:\n log.debug('Clean up: removing %s' % temp_path)\n if os.path.exists(temp_path):\n os.remove(temp_path)\n if stop_write_thread is True:\n log.debug('Ending HDF5 write thread')\n break\n<|end_body_1|>\n", "class_docstring": "Copies small hdf5 feature tables into one large hdf5 feature table", "class_name": "WriteHDF5Thread", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WriteHDF5Thread:\n \"\"\"Copies small hdf5 feature tables into one large hdf5 feature table\"\"\"\n\n def __init__(self, h5_filename_queue):\n \"\"\"param h5_filename_queue: a queue of temporary hdf5 files\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.h5_filename_queue = h5_filename_queue\n tables.open_file(table_path, 'w').close()\n super(WriteHDF5Thread, self).__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n if not self.h5_filename_queue.empty():\n temp_path = self.h5_filename_queue.get()\n log.debug('Writing %s to %s' % (temp_path, table_path))\n try:\n with tables.open_file(temp_path, 'a') as hdf5temp:\n with tables.open_file(table_path, 'a') as hdf5:\n temp_table = hdf5temp.root.values\n temp_status_table = hdf5temp.root.status\n if not hasattr(hdf5.root, 'values'):\n temp_table.copy(hdf5.root, 'values')\n temp_status_table.copy(hdf5.root, 'status')\n else:\n table = hdf5.root.values\n status_table = hdf5.root.status\n table.append(temp_table[:])\n status_table.append(temp_status_table[:])\n table.flush()\n status_table.flush()\n except StandardError as e:\n log.exception('Could not read hdf5 file')\n finally:\n log.debug('Clean up: removing %s' % temp_path)\n if os.path.exists(temp_path):\n os.remove(temp_path)\n if stop_write_thread is True:\n log.debug('Ending HDF5 write thread')\n break\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000133", "length_bytes": 16115, "license_type": "permissive", "methods": [{"docstring": "param h5_filename_queue: a queue of temporary hdf5 files", "name": "__init__", "signature": "def __init__(self, h5_filename_queue)"}, {"docstring": "While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006887", "prompt": "Implement the Python class `WriteHDF5Thread` described below.\n\nClass description:\nCopies small hdf5 feature tables into one large hdf5 feature table\n\nMethod signatures and docstrings:\n- def __init__(self, h5_filename_queue): param h5_filename_queue: a queue of temporary hdf5 files\n- def run(self): While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.", "prompted_full_text": "Implement the Python class `WriteHDF5Thread` described below.\n\nClass description:\nCopies small hdf5 feature tables into one large hdf5 feature table\n\nMethod signatures and docstrings:\n- def __init__(self, h5_filename_queue): param h5_filename_queue: a queue of temporary hdf5 files\n- def run(self): While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.\n\n<|skeleton|>\nclass WriteHDF5Thread:\n \"\"\"Copies small hdf5 feature tables into one large hdf5 feature table\"\"\"\n\n def __init__(self, h5_filename_queue):\n \"\"\"param h5_filename_queue: a queue of temporary hdf5 files\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.h5_filename_queue = h5_filename_queue\n tables.open_file(table_path, 'w').close()\n super(WriteHDF5Thread, self).__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n if not self.h5_filename_queue.empty():\n temp_path = self.h5_filename_queue.get()\n log.debug('Writing %s to %s' % (temp_path, table_path))\n try:\n with tables.open_file(temp_path, 'a') as hdf5temp:\n with tables.open_file(table_path, 'a') as hdf5:\n temp_table = hdf5temp.root.values\n temp_status_table = hdf5temp.root.status\n if not hasattr(hdf5.root, 'values'):\n temp_table.copy(hdf5.root, 'values')\n temp_status_table.copy(hdf5.root, 'status')\n else:\n table = hdf5.root.values\n status_table = hdf5.root.status\n table.append(temp_table[:])\n status_table.append(temp_status_table[:])\n table.flush()\n status_table.flush()\n except StandardError as e:\n log.exception('Could not read hdf5 file')\n finally:\n log.debug('Clean up: removing %s' % temp_path)\n if os.path.exists(temp_path):\n os.remove(temp_path)\n if stop_write_thread is True:\n log.debug('Ending HDF5 write thread')\n break\n<|end_body_1|>\n", "revision_id": "bf9a5470908ea0823c8398565086b1e6b960c73b", "skeleton": "<|skeleton|>\nclass WriteHDF5Thread:\n \"\"\"Copies small hdf5 feature tables into one large hdf5 feature table\"\"\"\n\n def __init__(self, h5_filename_queue):\n \"\"\"param h5_filename_queue: a queue of temporary hdf5 files\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WriteHDF5Thread:\n \"\"\"Copies small hdf5 feature tables into one large hdf5 feature table\"\"\"\n\n def __init__(self, h5_filename_queue):\n \"\"\"param h5_filename_queue: a queue of temporary hdf5 files\"\"\"\n self.h5_filename_queue = h5_filename_queue\n tables.open_file(table_path, 'w').close()\n super(WriteHDF5Thread, self).__init__()\n\n def run(self):\n \"\"\"While queue is not empty and stop_write_thread has not been set to true, the thread will open temporary hdf5 tables and copy them into the main hdf5 table and then delete the temporary file.\"\"\"\n while True:\n if not self.h5_filename_queue.empty():\n temp_path = self.h5_filename_queue.get()\n log.debug('Writing %s to %s' % (temp_path, table_path))\n try:\n with tables.open_file(temp_path, 'a') as hdf5temp:\n with tables.open_file(table_path, 'a') as hdf5:\n temp_table = hdf5temp.root.values\n temp_status_table = hdf5temp.root.status\n if not hasattr(hdf5.root, 'values'):\n temp_table.copy(hdf5.root, 'values')\n temp_status_table.copy(hdf5.root, 'status')\n else:\n table = hdf5.root.values\n status_table = hdf5.root.status\n table.append(temp_table[:])\n status_table.append(temp_status_table[:])\n table.flush()\n status_table.flush()\n except StandardError as e:\n log.exception('Could not read hdf5 file')\n finally:\n log.debug('Clean up: removing %s' % temp_path)\n if os.path.exists(temp_path):\n os.remove(temp_path)\n if stop_write_thread is True:\n log.debug('Ending HDF5 write thread')\n break\n", "source": "the_stack_v2_python_sparse", "source_path": "webserver/python2.7/site-packages/bqapi/bqfeature.py", "source_repo": "maxr1876/Radix", "split": "val", "star_events_count": 0} {"blob_id": "3ce597e3152c1182e3141e2370e04f1e61f2f2ad", "bodies": ["super(MessagePassing, self).__init__(options, is_training)\nif not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\nself.use_reverse_edges = options.use_reverse_edges\nself.add_bi_directional_edges = options.add_bi_directional_edges\nself.add_self_loop_edges = True", "original_nodes = graphs_tuple.nodes\noriginal_edges = graphs_tuple.edges\nnetwork = GNetMPNN()\nfor _ in range(self.options.n_layer):\n graphs_tuple = network(graphs_tuple, hidden_size=self.options.hidden_size, regularizer=regularizer, attn_scale=self.options.attn_scale, attn_dropout_keep_prob=self.options.attn_dropout_keep_prob, is_training=self.is_training)\nif self.options.n_layer:\n graphs_tuple = graphs_tuple.replace(nodes=original_nodes + graphs_tuple.nodes, edges=original_edges + graphs_tuple.edges)\nreturn graphs_tuple"], "bodies_text": "<|body_start_0|>\n super(MessagePassing, self).__init__(options, is_training)\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True\n<|end_body_0|>\n\n<|body_start_1|>\n original_nodes = graphs_tuple.nodes\n original_edges = graphs_tuple.edges\n network = GNetMPNN()\n for _ in range(self.options.n_layer):\n graphs_tuple = network(graphs_tuple, hidden_size=self.options.hidden_size, regularizer=regularizer, attn_scale=self.options.attn_scale, attn_dropout_keep_prob=self.options.attn_dropout_keep_prob, is_training=self.is_training)\n if self.options.n_layer:\n graphs_tuple = graphs_tuple.replace(nodes=original_nodes + graphs_tuple.nodes, edges=original_edges + graphs_tuple.edges)\n return graphs_tuple\n<|end_body_1|>\n", "class_docstring": "Self attention model using a RNN cell.", "class_name": "MessagePassing", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MessagePassing:\n \"\"\"Self attention model using a RNN cell.\"\"\"\n\n def __init__(self, options, is_training=False):\n \"\"\"Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\"\"\"\n <|body_0|>\n\n def _build_graph(self, graphs_tuple, regularizer):\n \"\"\"Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MessagePassing, self).__init__(options, is_training)\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True\n<|end_body_0|>\n\n<|body_start_1|>\n original_nodes = graphs_tuple.nodes\n original_edges = graphs_tuple.edges\n network = GNetMPNN()\n for _ in range(self.options.n_layer):\n graphs_tuple = network(graphs_tuple, hidden_size=self.options.hidden_size, regularizer=regularizer, attn_scale=self.options.attn_scale, attn_dropout_keep_prob=self.options.attn_dropout_keep_prob, is_training=self.is_training)\n if self.options.n_layer:\n graphs_tuple = graphs_tuple.replace(nodes=original_nodes + graphs_tuple.nodes, edges=original_edges + graphs_tuple.edges)\n return graphs_tuple\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000134", "length_bytes": 18418, "license_type": "permissive", "methods": [{"docstring": "Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.", "name": "__init__", "signature": "def __init__(self, options, is_training=False)"}, {"docstring": "Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.", "name": "_build_graph", "signature": "def _build_graph(self, graphs_tuple, regularizer)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002218", "prompt": "Implement the Python class `MessagePassing` described below.\n\nClass description:\nSelf attention model using a RNN cell.\n\nMethod signatures and docstrings:\n- def __init__(self, options, is_training=False): Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\n- def _build_graph(self, graphs_tuple, regularizer): Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.", "prompted_full_text": "Implement the Python class `MessagePassing` described below.\n\nClass description:\nSelf attention model using a RNN cell.\n\nMethod signatures and docstrings:\n- def __init__(self, options, is_training=False): Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\n- def _build_graph(self, graphs_tuple, regularizer): Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.\n\n<|skeleton|>\nclass MessagePassing:\n \"\"\"Self attention model using a RNN cell.\"\"\"\n\n def __init__(self, options, is_training=False):\n \"\"\"Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\"\"\"\n <|body_0|>\n\n def _build_graph(self, graphs_tuple, regularizer):\n \"\"\"Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MessagePassing, self).__init__(options, is_training)\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True\n<|end_body_0|>\n\n<|body_start_1|>\n original_nodes = graphs_tuple.nodes\n original_edges = graphs_tuple.edges\n network = GNetMPNN()\n for _ in range(self.options.n_layer):\n graphs_tuple = network(graphs_tuple, hidden_size=self.options.hidden_size, regularizer=regularizer, attn_scale=self.options.attn_scale, attn_dropout_keep_prob=self.options.attn_dropout_keep_prob, is_training=self.is_training)\n if self.options.n_layer:\n graphs_tuple = graphs_tuple.replace(nodes=original_nodes + graphs_tuple.nodes, edges=original_edges + graphs_tuple.edges)\n return graphs_tuple\n<|end_body_1|>\n", "revision_id": "4d20dadffe7584ac2c7f26419960512380b8d06e", "skeleton": "<|skeleton|>\nclass MessagePassing:\n \"\"\"Self attention model using a RNN cell.\"\"\"\n\n def __init__(self, options, is_training=False):\n \"\"\"Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\"\"\"\n <|body_0|>\n\n def _build_graph(self, graphs_tuple, regularizer):\n \"\"\"Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MessagePassing:\n \"\"\"Self attention model using a RNN cell.\"\"\"\n\n def __init__(self, options, is_training=False):\n \"\"\"Initializes the graph network. Args: options: proto to store the configs. is_training: if True, build the training graph.\"\"\"\n super(MessagePassing, self).__init__(options, is_training)\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True\n\n def _build_graph(self, graphs_tuple, regularizer):\n \"\"\"Builds graph network. Args: graphs_tuple: A GraphTuple instance. regularizer: Regularizer to be used in linear layers. Returns: output_graphs_tuple: A updated GraphTuple instance.\"\"\"\n original_nodes = graphs_tuple.nodes\n original_edges = graphs_tuple.edges\n network = GNetMPNN()\n for _ in range(self.options.n_layer):\n graphs_tuple = network(graphs_tuple, hidden_size=self.options.hidden_size, regularizer=regularizer, attn_scale=self.options.attn_scale, attn_dropout_keep_prob=self.options.attn_dropout_keep_prob, is_training=self.is_training)\n if self.options.n_layer:\n graphs_tuple = graphs_tuple.replace(nodes=original_nodes + graphs_tuple.nodes, edges=original_edges + graphs_tuple.edges)\n return graphs_tuple\n", "source": "the_stack_v2_python_sparse", "source_path": "modeling/modules/graph_networks.py", "source_repo": "yekeren/WSSGG", "split": "val", "star_events_count": 40} {"blob_id": "69b393cd5759dd2a7e1fb48e5e28999d4c630796", "bodies": ["blog_ids = Blog.objects.filter(user=request.my_user).values_list('id', flat=True)\nblog_list = update.get_blog_info(list(blog_ids))\nreturn success(blog_list)", "blog_id = json.loads(request.body).get('blog_id')\nif update.update_status(blog_id, '3'):\n return success()\nreturn error(404, '删除失败')"], "bodies_text": "<|body_start_0|>\n blog_ids = Blog.objects.filter(user=request.my_user).values_list('id', flat=True)\n blog_list = update.get_blog_info(list(blog_ids))\n return success(blog_list)\n<|end_body_0|>\n\n<|body_start_1|>\n blog_id = json.loads(request.body).get('blog_id')\n if update.update_status(blog_id, '3'):\n return success()\n return error(404, '删除失败')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BlogInfo", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlogInfo:\n\n def get(self, request, user_id):\n \"\"\"获取博客\"\"\"\n <|body_0|>\n\n def patch(self, request, user_id):\n \"\"\"删除博客\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n blog_ids = Blog.objects.filter(user=request.my_user).values_list('id', flat=True)\n blog_list = update.get_blog_info(list(blog_ids))\n return success(blog_list)\n<|end_body_0|>\n\n<|body_start_1|>\n blog_id = json.loads(request.body).get('blog_id')\n if update.update_status(blog_id, '3'):\n return success()\n return error(404, '删除失败')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000135", "length_bytes": 4853, "license_type": "no_license", "methods": [{"docstring": "获取博客", "name": "get", "signature": "def get(self, request, user_id)"}, {"docstring": "删除博客", "name": "patch", "signature": "def patch(self, request, user_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002948", "prompt": "Implement the Python class `BlogInfo` described below.\n\nClass description:\nImplement the BlogInfo class.\n\nMethod signatures and docstrings:\n- def get(self, request, user_id): 获取博客\n- def patch(self, request, user_id): 删除博客", "prompted_full_text": "Implement the Python class `BlogInfo` described below.\n\nClass description:\nImplement the BlogInfo class.\n\nMethod signatures and docstrings:\n- def get(self, request, user_id): 获取博客\n- def patch(self, request, user_id): 删除博客\n\n<|skeleton|>\nclass BlogInfo:\n\n def get(self, request, user_id):\n \"\"\"获取博客\"\"\"\n <|body_0|>\n\n def patch(self, request, user_id):\n \"\"\"删除博客\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n blog_ids = Blog.objects.filter(user=request.my_user).values_list('id', flat=True)\n blog_list = update.get_blog_info(list(blog_ids))\n return success(blog_list)\n<|end_body_0|>\n\n<|body_start_1|>\n blog_id = json.loads(request.body).get('blog_id')\n if update.update_status(blog_id, '3'):\n return success()\n return error(404, '删除失败')\n<|end_body_1|>\n", "revision_id": "9870f9402c9416594409be974c640f890b46a41b", "skeleton": "<|skeleton|>\nclass BlogInfo:\n\n def get(self, request, user_id):\n \"\"\"获取博客\"\"\"\n <|body_0|>\n\n def patch(self, request, user_id):\n \"\"\"删除博客\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BlogInfo:\n def get(self, request, user_id):\n \"\"\"获取博客\"\"\"\n blog_ids = Blog.objects.filter(user=request.my_user).values_list('id', flat=True)\n blog_list = update.get_blog_info(list(blog_ids))\n return success(blog_list)\n\n def patch(self, request, user_id):\n \"\"\"删除博客\"\"\"\n blog_id = json.loads(request.body).get('blog_id')\n if update.update_status(blog_id, '3'):\n return success()\n return error(404, '删除失败')\n", "source": "the_stack_v2_python_sparse", "source_path": "PYMARA 2/user/views.py", "source_repo": "yuanzuliang/PyMara", "split": "val", "star_events_count": 1} {"blob_id": "aa0e47b2db689ce6f2281c885025a77e7a99fa39", "bodies": ["sec = False\nkey = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\nif value is None:\n value = time.time() * 1000\nvalue = str(value)\nwhile True:\n keys = redis_client.keys('%s_trade_%s*' % (SERVER_NAME, pm))\n if trade == False:\n if len(keys) > 0 and wait == False:\n break\n elif len(keys) > 0:\n time.sleep(0.1)\n continue\n if redis_client.setnx(key, value):\n if timeout is not None:\n redis_client.pexpire(key, int(timeout))\n sec = True\n break\n elif wait == False:\n break\n time.sleep(0.1)\nif sec:\n logging.debug('加锁成功,key:%s value:%s' % (key, value))\nelse:\n logging.debug('加锁失败,key:%s value:%s' % (key, value))\nreturn (sec, value)", "key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\nvalue = str(value)\ntmp_value = redis_client.get(key)\nif tmp_value == value:\n redis_client.delete(key)"], "bodies_text": "<|body_start_0|>\n sec = False\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n if value is None:\n value = time.time() * 1000\n value = str(value)\n while True:\n keys = redis_client.keys('%s_trade_%s*' % (SERVER_NAME, pm))\n if trade == False:\n if len(keys) > 0 and wait == False:\n break\n elif len(keys) > 0:\n time.sleep(0.1)\n continue\n if redis_client.setnx(key, value):\n if timeout is not None:\n redis_client.pexpire(key, int(timeout))\n sec = True\n break\n elif wait == False:\n break\n time.sleep(0.1)\n if sec:\n logging.debug('加锁成功,key:%s value:%s' % (key, value))\n else:\n logging.debug('加锁失败,key:%s value:%s' % (key, value))\n return (sec, value)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n value = str(value)\n tmp_value = redis_client.get(key)\n if tmp_value == value:\n redis_client.delete(key)\n<|end_body_1|>\n", "class_docstring": "平台锁", "class_name": "LockWait", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LockWait:\n \"\"\"平台锁\"\"\"\n\n def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False):\n \"\"\"lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\"\"\"\n <|body_0|>\n\n def remove_lock(self, pm, value):\n \"\"\"删除lock.wait\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sec = False\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n if value is None:\n value = time.time() * 1000\n value = str(value)\n while True:\n keys = redis_client.keys('%s_trade_%s*' % (SERVER_NAME, pm))\n if trade == False:\n if len(keys) > 0 and wait == False:\n break\n elif len(keys) > 0:\n time.sleep(0.1)\n continue\n if redis_client.setnx(key, value):\n if timeout is not None:\n redis_client.pexpire(key, int(timeout))\n sec = True\n break\n elif wait == False:\n break\n time.sleep(0.1)\n if sec:\n logging.debug('加锁成功,key:%s value:%s' % (key, value))\n else:\n logging.debug('加锁失败,key:%s value:%s' % (key, value))\n return (sec, value)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n value = str(value)\n tmp_value = redis_client.get(key)\n if tmp_value == value:\n redis_client.delete(key)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000136", "length_bytes": 3929, "license_type": "no_license", "methods": [{"docstring": "lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value", "name": "add_lock", "signature": "def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False)"}, {"docstring": "删除lock.wait", "name": "remove_lock", "signature": "def remove_lock(self, pm, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006698", "prompt": "Implement the Python class `LockWait` described below.\n\nClass description:\n平台锁\n\nMethod signatures and docstrings:\n- def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False): lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\n- def remove_lock(self, pm, value): 删除lock.wait", "prompted_full_text": "Implement the Python class `LockWait` described below.\n\nClass description:\n平台锁\n\nMethod signatures and docstrings:\n- def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False): lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\n- def remove_lock(self, pm, value): 删除lock.wait\n\n<|skeleton|>\nclass LockWait:\n \"\"\"平台锁\"\"\"\n\n def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False):\n \"\"\"lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\"\"\"\n <|body_0|>\n\n def remove_lock(self, pm, value):\n \"\"\"删除lock.wait\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sec = False\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n if value is None:\n value = time.time() * 1000\n value = str(value)\n while True:\n keys = redis_client.keys('%s_trade_%s*' % (SERVER_NAME, pm))\n if trade == False:\n if len(keys) > 0 and wait == False:\n break\n elif len(keys) > 0:\n time.sleep(0.1)\n continue\n if redis_client.setnx(key, value):\n if timeout is not None:\n redis_client.pexpire(key, int(timeout))\n sec = True\n break\n elif wait == False:\n break\n time.sleep(0.1)\n if sec:\n logging.debug('加锁成功,key:%s value:%s' % (key, value))\n else:\n logging.debug('加锁失败,key:%s value:%s' % (key, value))\n return (sec, value)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n value = str(value)\n tmp_value = redis_client.get(key)\n if tmp_value == value:\n redis_client.delete(key)\n<|end_body_1|>\n", "revision_id": "933327852b63b10e7d4254f5708739673bdf750d", "skeleton": "<|skeleton|>\nclass LockWait:\n \"\"\"平台锁\"\"\"\n\n def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False):\n \"\"\"lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\"\"\"\n <|body_0|>\n\n def remove_lock(self, pm, value):\n \"\"\"删除lock.wait\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LockWait:\n \"\"\"平台锁\"\"\"\n\n def add_lock(self, pm, value=None, timeout=None, wait=True, trade=False):\n \"\"\"lock.wait改为redis锁 params timeout 超时时间,毫秒 return sec, value\"\"\"\n sec = False\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n if value is None:\n value = time.time() * 1000\n value = str(value)\n while True:\n keys = redis_client.keys('%s_trade_%s*' % (SERVER_NAME, pm))\n if trade == False:\n if len(keys) > 0 and wait == False:\n break\n elif len(keys) > 0:\n time.sleep(0.1)\n continue\n if redis_client.setnx(key, value):\n if timeout is not None:\n redis_client.pexpire(key, int(timeout))\n sec = True\n break\n elif wait == False:\n break\n time.sleep(0.1)\n if sec:\n logging.debug('加锁成功,key:%s value:%s' % (key, value))\n else:\n logging.debug('加锁失败,key:%s value:%s' % (key, value))\n return (sec, value)\n\n def remove_lock(self, pm, value):\n \"\"\"删除lock.wait\"\"\"\n key = '%s_%s_%s' % (SERVER_NAME, LOCK_WAIT, pm)\n value = str(value)\n tmp_value = redis_client.get(key)\n if tmp_value == value:\n redis_client.delete(key)\n", "source": "the_stack_v2_python_sparse", "source_path": "publib/locker.py", "source_repo": "sunlewei-nuanguang/offlinedata_process", "split": "val", "star_events_count": 0} {"blob_id": "cfde457675af576c03951a39725249b17164802d", "bodies": ["vel_x = set_up_xy_velocity_cube('advection_velocity_x')\nvel_y = vel_x.copy(data=2.0 * np.ones(shape=(4, 3)))\nself.dummy_plugin = AdvectField(vel_x, vel_y)\nself.data = np.array([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])\nself.xgrid, self.ygrid = np.meshgrid(np.arange(3), np.arange(4))", "xsrc = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\nysrc = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\ncond = np.array([[False, True, True], [False, True, True], [False, True, True], [False, False, False]])\nxfrac = np.full((4, 3), 0.5)\nyfrac = np.full((4, 3), 0.75)\noutdata = np.zeros(shape=(4, 3))\nexpected_output = np.array([[0.0, 0.375, 0.75], [0.0, 0.0, 0.375], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\nself.dummy_plugin._increment_output_array(self.data, outdata, cond, self.xgrid, self.ygrid, xsrc, ysrc, xfrac, yfrac)\nself.assertIsInstance(outdata, np.ndarray)\nself.assertArrayAlmostEqual(outdata, expected_output)"], "bodies_text": "<|body_start_0|>\n vel_x = set_up_xy_velocity_cube('advection_velocity_x')\n vel_y = vel_x.copy(data=2.0 * np.ones(shape=(4, 3)))\n self.dummy_plugin = AdvectField(vel_x, vel_y)\n self.data = np.array([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])\n self.xgrid, self.ygrid = np.meshgrid(np.arange(3), np.arange(4))\n<|end_body_0|>\n\n<|body_start_1|>\n xsrc = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n ysrc = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n cond = np.array([[False, True, True], [False, True, True], [False, True, True], [False, False, False]])\n xfrac = np.full((4, 3), 0.5)\n yfrac = np.full((4, 3), 0.75)\n outdata = np.zeros(shape=(4, 3))\n expected_output = np.array([[0.0, 0.375, 0.75], [0.0, 0.0, 0.375], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n self.dummy_plugin._increment_output_array(self.data, outdata, cond, self.xgrid, self.ygrid, xsrc, ysrc, xfrac, yfrac)\n self.assertIsInstance(outdata, np.ndarray)\n self.assertArrayAlmostEqual(outdata, expected_output)\n<|end_body_1|>\n", "class_docstring": "Tests for the _increment_output_array method", "class_name": "Test__increment_output_array", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test__increment_output_array:\n \"\"\"Tests for the _increment_output_array method\"\"\"\n\n def setUp(self):\n \"\"\"Create input arrays\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n vel_x = set_up_xy_velocity_cube('advection_velocity_x')\n vel_y = vel_x.copy(data=2.0 * np.ones(shape=(4, 3)))\n self.dummy_plugin = AdvectField(vel_x, vel_y)\n self.data = np.array([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])\n self.xgrid, self.ygrid = np.meshgrid(np.arange(3), np.arange(4))\n<|end_body_0|>\n\n<|body_start_1|>\n xsrc = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n ysrc = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n cond = np.array([[False, True, True], [False, True, True], [False, True, True], [False, False, False]])\n xfrac = np.full((4, 3), 0.5)\n yfrac = np.full((4, 3), 0.75)\n outdata = np.zeros(shape=(4, 3))\n expected_output = np.array([[0.0, 0.375, 0.75], [0.0, 0.0, 0.375], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n self.dummy_plugin._increment_output_array(self.data, outdata, cond, self.xgrid, self.ygrid, xsrc, ysrc, xfrac, yfrac)\n self.assertIsInstance(outdata, np.ndarray)\n self.assertArrayAlmostEqual(outdata, expected_output)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000137", "length_bytes": 22262, "license_type": "permissive", "methods": [{"docstring": "Create input arrays", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings", "name": "test_basic", "signature": "def test_basic(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Test__increment_output_array` described below.\n\nClass description:\nTests for the _increment_output_array method\n\nMethod signatures and docstrings:\n- def setUp(self): Create input arrays\n- def test_basic(self): Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings", "prompted_full_text": "Implement the Python class `Test__increment_output_array` described below.\n\nClass description:\nTests for the _increment_output_array method\n\nMethod signatures and docstrings:\n- def setUp(self): Create input arrays\n- def test_basic(self): Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings\n\n<|skeleton|>\nclass Test__increment_output_array:\n \"\"\"Tests for the _increment_output_array method\"\"\"\n\n def setUp(self):\n \"\"\"Create input arrays\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n vel_x = set_up_xy_velocity_cube('advection_velocity_x')\n vel_y = vel_x.copy(data=2.0 * np.ones(shape=(4, 3)))\n self.dummy_plugin = AdvectField(vel_x, vel_y)\n self.data = np.array([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])\n self.xgrid, self.ygrid = np.meshgrid(np.arange(3), np.arange(4))\n<|end_body_0|>\n\n<|body_start_1|>\n xsrc = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n ysrc = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n cond = np.array([[False, True, True], [False, True, True], [False, True, True], [False, False, False]])\n xfrac = np.full((4, 3), 0.5)\n yfrac = np.full((4, 3), 0.75)\n outdata = np.zeros(shape=(4, 3))\n expected_output = np.array([[0.0, 0.375, 0.75], [0.0, 0.0, 0.375], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n self.dummy_plugin._increment_output_array(self.data, outdata, cond, self.xgrid, self.ygrid, xsrc, ysrc, xfrac, yfrac)\n self.assertIsInstance(outdata, np.ndarray)\n self.assertArrayAlmostEqual(outdata, expected_output)\n<|end_body_1|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass Test__increment_output_array:\n \"\"\"Tests for the _increment_output_array method\"\"\"\n\n def setUp(self):\n \"\"\"Create input arrays\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test__increment_output_array:\n \"\"\"Tests for the _increment_output_array method\"\"\"\n\n def setUp(self):\n \"\"\"Create input arrays\"\"\"\n vel_x = set_up_xy_velocity_cube('advection_velocity_x')\n vel_y = vel_x.copy(data=2.0 * np.ones(shape=(4, 3)))\n self.dummy_plugin = AdvectField(vel_x, vel_y)\n self.data = np.array([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0], [0.0, 1.0, 2.0], [0.0, 0.0, 1.0]])\n self.xgrid, self.ygrid = np.meshgrid(np.arange(3), np.arange(4))\n\n def test_basic(self):\n \"\"\"Test one increment from the points negative x-wards and positive y-wards on the source grid, with different directional weightings\"\"\"\n xsrc = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n ysrc = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n cond = np.array([[False, True, True], [False, True, True], [False, True, True], [False, False, False]])\n xfrac = np.full((4, 3), 0.5)\n yfrac = np.full((4, 3), 0.75)\n outdata = np.zeros(shape=(4, 3))\n expected_output = np.array([[0.0, 0.375, 0.75], [0.0, 0.0, 0.375], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n self.dummy_plugin._increment_output_array(self.data, outdata, cond, self.xgrid, self.ygrid, xsrc, ysrc, xfrac, yfrac)\n self.assertIsInstance(outdata, np.ndarray)\n self.assertArrayAlmostEqual(outdata, expected_output)\n", "source": "the_stack_v2_python_sparse", "source_path": "improver_tests/nowcasting/forecasting/test_AdvectField.py", "source_repo": "metoppv/improver", "split": "val", "star_events_count": 101} {"blob_id": "99c3c1b966c4f3037e7b35909ea5c5a885bf9c03", "bodies": ["session = DBSession()\nsession.merge(trans_inst)\nsession.commit()\nsession.close()", "session = DBSession()\nif 'user_id' in kwargs:\n _user_id = kwargs['user_id']\nselect = session.query(Trans_inst).filter(Trans_inst.user_id == _user_id).first()\nprint(select)\nsession.close()\nreturn select"], "bodies_text": "<|body_start_0|>\n session = DBSession()\n session.merge(trans_inst)\n session.commit()\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = DBSession()\n if 'user_id' in kwargs:\n _user_id = kwargs['user_id']\n select = session.query(Trans_inst).filter(Trans_inst.user_id == _user_id).first()\n print(select)\n session.close()\n return select\n<|end_body_1|>\n", "class_docstring": "策略实例model类", "class_name": "Trans_inst", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Trans_inst:\n \"\"\"策略实例model类\"\"\"\n\n def save(trans_inst):\n \"\"\"新加/修改策略实例表 :param trans: :return:\"\"\"\n <|body_0|>\n\n def select(self, **kwargs):\n \"\"\"新加/修改交易表 :param trans: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = DBSession()\n session.merge(trans_inst)\n session.commit()\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = DBSession()\n if 'user_id' in kwargs:\n _user_id = kwargs['user_id']\n select = session.query(Trans_inst).filter(Trans_inst.user_id == _user_id).first()\n print(select)\n session.close()\n return select\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000138", "length_bytes": 8115, "license_type": "no_license", "methods": [{"docstring": "新加/修改策略实例表 :param trans: :return:", "name": "save", "signature": "def save(trans_inst)"}, {"docstring": "新加/修改交易表 :param trans: :return:", "name": "select", "signature": "def select(self, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000519", "prompt": "Implement the Python class `Trans_inst` described below.\n\nClass description:\n策略实例model类\n\nMethod signatures and docstrings:\n- def save(trans_inst): 新加/修改策略实例表 :param trans: :return:\n- def select(self, **kwargs): 新加/修改交易表 :param trans: :return:", "prompted_full_text": "Implement the Python class `Trans_inst` described below.\n\nClass description:\n策略实例model类\n\nMethod signatures and docstrings:\n- def save(trans_inst): 新加/修改策略实例表 :param trans: :return:\n- def select(self, **kwargs): 新加/修改交易表 :param trans: :return:\n\n<|skeleton|>\nclass Trans_inst:\n \"\"\"策略实例model类\"\"\"\n\n def save(trans_inst):\n \"\"\"新加/修改策略实例表 :param trans: :return:\"\"\"\n <|body_0|>\n\n def select(self, **kwargs):\n \"\"\"新加/修改交易表 :param trans: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = DBSession()\n session.merge(trans_inst)\n session.commit()\n session.close()\n<|end_body_0|>\n\n<|body_start_1|>\n session = DBSession()\n if 'user_id' in kwargs:\n _user_id = kwargs['user_id']\n select = session.query(Trans_inst).filter(Trans_inst.user_id == _user_id).first()\n print(select)\n session.close()\n return select\n<|end_body_1|>\n", "revision_id": "1bc744a6d331b4b733f6b6658b8310eb0c30524e", "skeleton": "<|skeleton|>\nclass Trans_inst:\n \"\"\"策略实例model类\"\"\"\n\n def save(trans_inst):\n \"\"\"新加/修改策略实例表 :param trans: :return:\"\"\"\n <|body_0|>\n\n def select(self, **kwargs):\n \"\"\"新加/修改交易表 :param trans: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Trans_inst:\n \"\"\"策略实例model类\"\"\"\n\n def save(trans_inst):\n \"\"\"新加/修改策略实例表 :param trans: :return:\"\"\"\n session = DBSession()\n session.merge(trans_inst)\n session.commit()\n session.close()\n\n def select(self, **kwargs):\n \"\"\"新加/修改交易表 :param trans: :return:\"\"\"\n session = DBSession()\n if 'user_id' in kwargs:\n _user_id = kwargs['user_id']\n select = session.query(Trans_inst).filter(Trans_inst.user_id == _user_id).first()\n print(select)\n session.close()\n return select\n", "source": "the_stack_v2_python_sparse", "source_path": "investment/transaction/models.py", "source_repo": "cliicy/vtrade", "split": "val", "star_events_count": 0} {"blob_id": "b4c85ea22f86b770e02b54f593325a061217c67b", "bodies": ["self.model = model\nself.handle = []\nself.relu_outputs = []", "def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\nfor _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)", "def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\ndef _clip_gradients(module, grad_in, grad_out):\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n modified_grad_out = positive_grad_out * relu_output\n return (modified_grad_out,)\nfor _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)"], "bodies_text": "<|body_start_0|>\n self.model = model\n self.handle = []\n self.relu_outputs = []\n<|end_body_0|>\n\n<|body_start_1|>\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)\n<|end_body_1|>\n\n<|body_start_2|>\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n modified_grad_out = positive_grad_out * relu_output\n return (modified_grad_out,)\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)\n<|end_body_2|>\n", "class_docstring": "Base class for backpropagation.", "class_name": "BaseProp", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseProp:\n \"\"\"Base class for backpropagation.\"\"\"\n\n def __init__(self, model):\n \"\"\"Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\"\"\"\n <|body_0|>\n\n def _register_conv_hook(self):\n \"\"\"Register hook function to save gradient w.r.t input image.\"\"\"\n <|body_1|>\n\n def _register_relu_hooks(self):\n \"\"\"Register hook function to save forward and backward relu result.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = model\n self.handle = []\n self.relu_outputs = []\n<|end_body_0|>\n\n<|body_start_1|>\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)\n<|end_body_1|>\n\n<|body_start_2|>\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n modified_grad_out = positive_grad_out * relu_output\n return (modified_grad_out,)\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000139", "length_bytes": 2125, "license_type": "permissive", "methods": [{"docstring": "Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.", "name": "__init__", "signature": "def __init__(self, model)"}, {"docstring": "Register hook function to save gradient w.r.t input image.", "name": "_register_conv_hook", "signature": "def _register_conv_hook(self)"}, {"docstring": "Register hook function to save forward and backward relu result.", "name": "_register_relu_hooks", "signature": "def _register_relu_hooks(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000360", "prompt": "Implement the Python class `BaseProp` described below.\n\nClass description:\nBase class for backpropagation.\n\nMethod signatures and docstrings:\n- def __init__(self, model): Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\n- def _register_conv_hook(self): Register hook function to save gradient w.r.t input image.\n- def _register_relu_hooks(self): Register hook function to save forward and backward relu result.", "prompted_full_text": "Implement the Python class `BaseProp` described below.\n\nClass description:\nBase class for backpropagation.\n\nMethod signatures and docstrings:\n- def __init__(self, model): Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\n- def _register_conv_hook(self): Register hook function to save gradient w.r.t input image.\n- def _register_relu_hooks(self): Register hook function to save forward and backward relu result.\n\n<|skeleton|>\nclass BaseProp:\n \"\"\"Base class for backpropagation.\"\"\"\n\n def __init__(self, model):\n \"\"\"Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\"\"\"\n <|body_0|>\n\n def _register_conv_hook(self):\n \"\"\"Register hook function to save gradient w.r.t input image.\"\"\"\n <|body_1|>\n\n def _register_relu_hooks(self):\n \"\"\"Register hook function to save forward and backward relu result.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = model\n self.handle = []\n self.relu_outputs = []\n<|end_body_0|>\n\n<|body_start_1|>\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)\n<|end_body_1|>\n\n<|body_start_2|>\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n modified_grad_out = positive_grad_out * relu_output\n return (modified_grad_out,)\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)\n<|end_body_2|>\n", "revision_id": "c2f0323b0ec55d684ee24dbe35a6046fe0074663", "skeleton": "<|skeleton|>\nclass BaseProp:\n \"\"\"Base class for backpropagation.\"\"\"\n\n def __init__(self, model):\n \"\"\"Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\"\"\"\n <|body_0|>\n\n def _register_conv_hook(self):\n \"\"\"Register hook function to save gradient w.r.t input image.\"\"\"\n <|body_1|>\n\n def _register_relu_hooks(self):\n \"\"\"Register hook function to save forward and backward relu result.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseProp:\n \"\"\"Base class for backpropagation.\"\"\"\n\n def __init__(self, model):\n \"\"\"Init # Arguments: model: torchvision.models. A pretrained model. handle: list. Handle list that register a hook function. relu_outputs: list. Forward output after relu.\"\"\"\n self.model = model\n self.handle = []\n self.relu_outputs = []\n\n def _register_conv_hook(self):\n \"\"\"Register hook function to save gradient w.r.t input image.\"\"\"\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)\n\n def _register_relu_hooks(self):\n \"\"\"Register hook function to save forward and backward relu result.\"\"\"\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n modified_grad_out = positive_grad_out * relu_output\n return (modified_grad_out,)\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)\n", "source": "the_stack_v2_python_sparse", "source_path": "xdeep/xlocal/gradient/backprop/base.py", "source_repo": "datamllab/xdeep", "split": "val", "star_events_count": 40} {"blob_id": "2d1b333d2e1403d1582431f1a6dc5348d084f571", "bodies": ["m = abs(n)\nans = 1\nwhile m > 0:\n if m & 1 == 1:\n ans *= x\n x *= x\n m >>= 1\nreturn ans if n >= 0 else 1 / ans", "if n == 0:\n return 1\nelif n == 1:\n return x\nelif n < 0:\n return self.myPow(1 / x, -n)\nelif n % 2 == 0:\n tmp = self.myPow(x, n // 2)\n return tmp * tmp\nelse:\n return x * self.myPow(x, n - 1)", "def dp(n):\n if n in record:\n return record[n]\n else:\n if n == 1:\n return x\n if n == 2:\n record[n] = x * x\n elif n % 2 == 0:\n record[n] = dp(n // 2) * dp(n // 2)\n else:\n record[n] = x * dp(n // 2) * dp(n // 2)\n return record[n]\nrecord = {}\nif n > 0:\n return dp(n)\nelif n < 0:\n return 1 / dp(-n)\nelse:\n return 1"], "bodies_text": "<|body_start_0|>\n m = abs(n)\n ans = 1\n while m > 0:\n if m & 1 == 1:\n ans *= x\n x *= x\n m >>= 1\n return ans if n >= 0 else 1 / ans\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0:\n return 1\n elif n == 1:\n return x\n elif n < 0:\n return self.myPow(1 / x, -n)\n elif n % 2 == 0:\n tmp = self.myPow(x, n // 2)\n return tmp * tmp\n else:\n return x * self.myPow(x, n - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n def dp(n):\n if n in record:\n return record[n]\n else:\n if n == 1:\n return x\n if n == 2:\n record[n] = x * x\n elif n % 2 == 0:\n record[n] = dp(n // 2) * dp(n // 2)\n else:\n record[n] = x * dp(n // 2) * dp(n // 2)\n return record[n]\n record = {}\n if n > 0:\n return dp(n)\n elif n < 0:\n return 1 / dp(-n)\n else:\n return 1\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def myPow(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_0|>\n\n def myPow_recursive(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_1|>\n\n def myPow_dp(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = abs(n)\n ans = 1\n while m > 0:\n if m & 1 == 1:\n ans *= x\n x *= x\n m >>= 1\n return ans if n >= 0 else 1 / ans\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0:\n return 1\n elif n == 1:\n return x\n elif n < 0:\n return self.myPow(1 / x, -n)\n elif n % 2 == 0:\n tmp = self.myPow(x, n // 2)\n return tmp * tmp\n else:\n return x * self.myPow(x, n - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n def dp(n):\n if n in record:\n return record[n]\n else:\n if n == 1:\n return x\n if n == 2:\n record[n] = x * x\n elif n % 2 == 0:\n record[n] = dp(n // 2) * dp(n // 2)\n else:\n record[n] = x * dp(n // 2) * dp(n // 2)\n return record[n]\n record = {}\n if n > 0:\n return dp(n)\n elif n < 0:\n return 1 / dp(-n)\n else:\n return 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000140", "length_bytes": 2143, "license_type": "no_license", "methods": [{"docstring": ":type x: float :type n: int :rtype: float", "name": "myPow", "signature": "def myPow(self, x, n)"}, {"docstring": ":type x: float :type n: int :rtype: float", "name": "myPow_recursive", "signature": "def myPow_recursive(self, x, n)"}, {"docstring": ":type x: float :type n: int :rtype: float", "name": "myPow_dp", "signature": "def myPow_dp(self, x, n)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001130", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def myPow(self, x, n): :type x: float :type n: int :rtype: float\n- def myPow_recursive(self, x, n): :type x: float :type n: int :rtype: float\n- def myPow_dp(self, x, n): :type x: float :type n: int :rtype: float", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def myPow(self, x, n): :type x: float :type n: int :rtype: float\n- def myPow_recursive(self, x, n): :type x: float :type n: int :rtype: float\n- def myPow_dp(self, x, n): :type x: float :type n: int :rtype: float\n\n<|skeleton|>\nclass Solution:\n\n def myPow(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_0|>\n\n def myPow_recursive(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_1|>\n\n def myPow_dp(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = abs(n)\n ans = 1\n while m > 0:\n if m & 1 == 1:\n ans *= x\n x *= x\n m >>= 1\n return ans if n >= 0 else 1 / ans\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0:\n return 1\n elif n == 1:\n return x\n elif n < 0:\n return self.myPow(1 / x, -n)\n elif n % 2 == 0:\n tmp = self.myPow(x, n // 2)\n return tmp * tmp\n else:\n return x * self.myPow(x, n - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n def dp(n):\n if n in record:\n return record[n]\n else:\n if n == 1:\n return x\n if n == 2:\n record[n] = x * x\n elif n % 2 == 0:\n record[n] = dp(n // 2) * dp(n // 2)\n else:\n record[n] = x * dp(n // 2) * dp(n // 2)\n return record[n]\n record = {}\n if n > 0:\n return dp(n)\n elif n < 0:\n return 1 / dp(-n)\n else:\n return 1\n<|end_body_2|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def myPow(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_0|>\n\n def myPow_recursive(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_1|>\n\n def myPow_dp(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def myPow(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n m = abs(n)\n ans = 1\n while m > 0:\n if m & 1 == 1:\n ans *= x\n x *= x\n m >>= 1\n return ans if n >= 0 else 1 / ans\n\n def myPow_recursive(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n if n == 0:\n return 1\n elif n == 1:\n return x\n elif n < 0:\n return self.myPow(1 / x, -n)\n elif n % 2 == 0:\n tmp = self.myPow(x, n // 2)\n return tmp * tmp\n else:\n return x * self.myPow(x, n - 1)\n\n def myPow_dp(self, x, n):\n \"\"\":type x: float :type n: int :rtype: float\"\"\"\n def dp(n):\n if n in record:\n return record[n]\n else:\n if n == 1:\n return x\n if n == 2:\n record[n] = x * x\n elif n % 2 == 0:\n record[n] = dp(n // 2) * dp(n // 2)\n else:\n record[n] = x * dp(n // 2) * dp(n // 2)\n return record[n]\n record = {}\n if n > 0:\n return dp(n)\n elif n < 0:\n return 1 / dp(-n)\n else:\n return 1\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_50.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0} {"blob_id": "0fc1cbabb7e623919f8961f9dd343c2cb84f3d1f", "bodies": ["super(TemplateCreate, self).AssertBasePermission(mr)\nif not self.CheckPerm(mr, permissions.EDIT_PROJECT):\n raise permissions.PermissionException('User is not allowed to administer this project')", "config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\nfield_views = tracker_views.MakeAllFieldValueViews(config, [], [], [], {})\napproval_subfields_present = any((fv.field_def.is_approval_subfield for fv in field_views))\ninitial_phases = [tracker_pb2.Phase()] * template_helpers.MAX_NUM_PHASES\nreturn {'admin_tab_mode': self._PROCESS_SUBTAB, 'allow_edit': ezt.boolean(True), 'new_template_form': ezt.boolean(True), 'initial_members_only': ezt.boolean(False), 'template_name': '', 'initial_content': '', 'initial_must_edit_summary': ezt.boolean(False), 'initial_summary': '', 'initial_status': '', 'initial_owner': '', 'initial_owner_defaults_to_member': ezt.boolean(False), 'initial_components': '', 'initial_component_required': ezt.boolean(False), 'initial_admins': '', 'fields': [view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], 'initial_add_approvals': ezt.boolean(False), 'initial_phases': initial_phases, 'approvals': [view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], 'prechecked_approvals': [], 'required_approval_ids': [], 'approval_subfields_present': ezt.boolean(approval_subfields_present), 'phase_fields_present': ezt.boolean(False)}", "config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\nparsed = template_helpers.ParseTemplateRequest(post_data, config)\nfield_helpers.ShiftEnumFieldsIntoLabels(parsed.labels, [], parsed.field_val_strs, [], config)\nif not parsed.name:\n mr.errors.name = 'Please provide a template name'\nif self.services.template.GetTemplateByName(mr.cnxn, parsed.name, mr.project_id):\n mr.errors.name = 'Template with name %s already exists' % parsed.name\nadmin_ids, owner_id, component_ids, field_values, phases, approvals = template_helpers.GetTemplateInfoFromParsed(mr, self.services, parsed, config)\nif mr.errors.AnyErrors():\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], field_values, {})\n prechecked_approvals = template_helpers.GetCheckedApprovalsFromParsed(parsed.approvals_to_phase_idx)\n self.PleaseCorrect(mr, initial_members_only=ezt.boolean(parsed.members_only), template_name=parsed.name, initial_content=parsed.summary, initial_must_edit_summary=ezt.boolean(parsed.summary_must_be_edited), initial_description=parsed.content, initial_status=parsed.status, initial_owner=parsed.owner_str, initial_owner_defaults_to_member=ezt.boolean(parsed.owner_defaults_to_member), initial_components=', '.join(parsed.component_paths), initial_component_required=ezt.boolean(parsed.component_required), initial_admins=parsed.admin_str, labels=parsed.labels, fields=[view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], initial_add_approvals=ezt.boolean(parsed.add_approvals), initial_phases=[tracker_pb2.Phase(name=name) for name in parsed.phase_names], approvals=[view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], prechecked_approvals=prechecked_approvals, required_approval_ids=parsed.required_approval_ids)\n return\nlabels = [label for label in parsed.labels if label]\nself.services.template.CreateIssueTemplateDef(mr.cnxn, mr.project_id, parsed.name, parsed.content, parsed.summary, parsed.summary_must_be_edited, parsed.status, parsed.members_only, parsed.owner_defaults_to_member, parsed.component_required, owner_id, labels, component_ids, admin_ids, field_values, phases=phases, approval_values=approvals)\nreturn framework_helpers.FormatAbsoluteURL(mr, urls.ADMIN_TEMPLATES, saved=1, ts=int(time.time()))"], "bodies_text": "<|body_start_0|>\n super(TemplateCreate, self).AssertBasePermission(mr)\n if not self.CheckPerm(mr, permissions.EDIT_PROJECT):\n raise permissions.PermissionException('User is not allowed to administer this project')\n<|end_body_0|>\n\n<|body_start_1|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], [], {})\n approval_subfields_present = any((fv.field_def.is_approval_subfield for fv in field_views))\n initial_phases = [tracker_pb2.Phase()] * template_helpers.MAX_NUM_PHASES\n return {'admin_tab_mode': self._PROCESS_SUBTAB, 'allow_edit': ezt.boolean(True), 'new_template_form': ezt.boolean(True), 'initial_members_only': ezt.boolean(False), 'template_name': '', 'initial_content': '', 'initial_must_edit_summary': ezt.boolean(False), 'initial_summary': '', 'initial_status': '', 'initial_owner': '', 'initial_owner_defaults_to_member': ezt.boolean(False), 'initial_components': '', 'initial_component_required': ezt.boolean(False), 'initial_admins': '', 'fields': [view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], 'initial_add_approvals': ezt.boolean(False), 'initial_phases': initial_phases, 'approvals': [view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], 'prechecked_approvals': [], 'required_approval_ids': [], 'approval_subfields_present': ezt.boolean(approval_subfields_present), 'phase_fields_present': ezt.boolean(False)}\n<|end_body_1|>\n\n<|body_start_2|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n parsed = template_helpers.ParseTemplateRequest(post_data, config)\n field_helpers.ShiftEnumFieldsIntoLabels(parsed.labels, [], parsed.field_val_strs, [], config)\n if not parsed.name:\n mr.errors.name = 'Please provide a template name'\n if self.services.template.GetTemplateByName(mr.cnxn, parsed.name, mr.project_id):\n mr.errors.name = 'Template with name %s already exists' % parsed.name\n admin_ids, owner_id, component_ids, field_values, phases, approvals = template_helpers.GetTemplateInfoFromParsed(mr, self.services, parsed, config)\n if mr.errors.AnyErrors():\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], field_values, {})\n prechecked_approvals = template_helpers.GetCheckedApprovalsFromParsed(parsed.approvals_to_phase_idx)\n self.PleaseCorrect(mr, initial_members_only=ezt.boolean(parsed.members_only), template_name=parsed.name, initial_content=parsed.summary, initial_must_edit_summary=ezt.boolean(parsed.summary_must_be_edited), initial_description=parsed.content, initial_status=parsed.status, initial_owner=parsed.owner_str, initial_owner_defaults_to_member=ezt.boolean(parsed.owner_defaults_to_member), initial_components=', '.join(parsed.component_paths), initial_component_required=ezt.boolean(parsed.component_required), initial_admins=parsed.admin_str, labels=parsed.labels, fields=[view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], initial_add_approvals=ezt.boolean(parsed.add_approvals), initial_phases=[tracker_pb2.Phase(name=name) for name in parsed.phase_names], approvals=[view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], prechecked_approvals=prechecked_approvals, required_approval_ids=parsed.required_approval_ids)\n return\n labels = [label for label in parsed.labels if label]\n self.services.template.CreateIssueTemplateDef(mr.cnxn, mr.project_id, parsed.name, parsed.content, parsed.summary, parsed.summary_must_be_edited, parsed.status, parsed.members_only, parsed.owner_defaults_to_member, parsed.component_required, owner_id, labels, component_ids, admin_ids, field_values, phases=phases, approval_values=approvals)\n return framework_helpers.FormatAbsoluteURL(mr, urls.ADMIN_TEMPLATES, saved=1, ts=int(time.time()))\n<|end_body_2|>\n", "class_docstring": "Servlet allowing project owners to create an issue template.", "class_name": "TemplateCreate", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TemplateCreate:\n \"\"\"Servlet allowing project owners to create an issue template.\"\"\"\n\n def AssertBasePermission(self, mr):\n \"\"\"Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\"\"\"\n <|body_0|>\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\"\"\"\n <|body_1|>\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TemplateCreate, self).AssertBasePermission(mr)\n if not self.CheckPerm(mr, permissions.EDIT_PROJECT):\n raise permissions.PermissionException('User is not allowed to administer this project')\n<|end_body_0|>\n\n<|body_start_1|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], [], {})\n approval_subfields_present = any((fv.field_def.is_approval_subfield for fv in field_views))\n initial_phases = [tracker_pb2.Phase()] * template_helpers.MAX_NUM_PHASES\n return {'admin_tab_mode': self._PROCESS_SUBTAB, 'allow_edit': ezt.boolean(True), 'new_template_form': ezt.boolean(True), 'initial_members_only': ezt.boolean(False), 'template_name': '', 'initial_content': '', 'initial_must_edit_summary': ezt.boolean(False), 'initial_summary': '', 'initial_status': '', 'initial_owner': '', 'initial_owner_defaults_to_member': ezt.boolean(False), 'initial_components': '', 'initial_component_required': ezt.boolean(False), 'initial_admins': '', 'fields': [view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], 'initial_add_approvals': ezt.boolean(False), 'initial_phases': initial_phases, 'approvals': [view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], 'prechecked_approvals': [], 'required_approval_ids': [], 'approval_subfields_present': ezt.boolean(approval_subfields_present), 'phase_fields_present': ezt.boolean(False)}\n<|end_body_1|>\n\n<|body_start_2|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n parsed = template_helpers.ParseTemplateRequest(post_data, config)\n field_helpers.ShiftEnumFieldsIntoLabels(parsed.labels, [], parsed.field_val_strs, [], config)\n if not parsed.name:\n mr.errors.name = 'Please provide a template name'\n if self.services.template.GetTemplateByName(mr.cnxn, parsed.name, mr.project_id):\n mr.errors.name = 'Template with name %s already exists' % parsed.name\n admin_ids, owner_id, component_ids, field_values, phases, approvals = template_helpers.GetTemplateInfoFromParsed(mr, self.services, parsed, config)\n if mr.errors.AnyErrors():\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], field_values, {})\n prechecked_approvals = template_helpers.GetCheckedApprovalsFromParsed(parsed.approvals_to_phase_idx)\n self.PleaseCorrect(mr, initial_members_only=ezt.boolean(parsed.members_only), template_name=parsed.name, initial_content=parsed.summary, initial_must_edit_summary=ezt.boolean(parsed.summary_must_be_edited), initial_description=parsed.content, initial_status=parsed.status, initial_owner=parsed.owner_str, initial_owner_defaults_to_member=ezt.boolean(parsed.owner_defaults_to_member), initial_components=', '.join(parsed.component_paths), initial_component_required=ezt.boolean(parsed.component_required), initial_admins=parsed.admin_str, labels=parsed.labels, fields=[view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], initial_add_approvals=ezt.boolean(parsed.add_approvals), initial_phases=[tracker_pb2.Phase(name=name) for name in parsed.phase_names], approvals=[view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], prechecked_approvals=prechecked_approvals, required_approval_ids=parsed.required_approval_ids)\n return\n labels = [label for label in parsed.labels if label]\n self.services.template.CreateIssueTemplateDef(mr.cnxn, mr.project_id, parsed.name, parsed.content, parsed.summary, parsed.summary_must_be_edited, parsed.status, parsed.members_only, parsed.owner_defaults_to_member, parsed.component_required, owner_id, labels, component_ids, admin_ids, field_values, phases=phases, approval_values=approvals)\n return framework_helpers.FormatAbsoluteURL(mr, urls.ADMIN_TEMPLATES, saved=1, ts=int(time.time()))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000141", "length_bytes": 6534, "license_type": "permissive", "methods": [{"docstring": "Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request", "name": "AssertBasePermission", "signature": "def AssertBasePermission(self, mr)"}, {"docstring": "Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.", "name": "GatherPageData", "signature": "def GatherPageData(self, mr)"}, {"docstring": "Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.", "name": "ProcessFormData", "signature": "def ProcessFormData(self, mr, post_data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000987", "prompt": "Implement the Python class `TemplateCreate` described below.\n\nClass description:\nServlet allowing project owners to create an issue template.\n\nMethod signatures and docstrings:\n- def AssertBasePermission(self, mr): Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\n- def GatherPageData(self, mr): Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\n- def ProcessFormData(self, mr, post_data): Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.", "prompted_full_text": "Implement the Python class `TemplateCreate` described below.\n\nClass description:\nServlet allowing project owners to create an issue template.\n\nMethod signatures and docstrings:\n- def AssertBasePermission(self, mr): Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\n- def GatherPageData(self, mr): Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\n- def ProcessFormData(self, mr, post_data): Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.\n\n<|skeleton|>\nclass TemplateCreate:\n \"\"\"Servlet allowing project owners to create an issue template.\"\"\"\n\n def AssertBasePermission(self, mr):\n \"\"\"Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\"\"\"\n <|body_0|>\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\"\"\"\n <|body_1|>\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TemplateCreate, self).AssertBasePermission(mr)\n if not self.CheckPerm(mr, permissions.EDIT_PROJECT):\n raise permissions.PermissionException('User is not allowed to administer this project')\n<|end_body_0|>\n\n<|body_start_1|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], [], {})\n approval_subfields_present = any((fv.field_def.is_approval_subfield for fv in field_views))\n initial_phases = [tracker_pb2.Phase()] * template_helpers.MAX_NUM_PHASES\n return {'admin_tab_mode': self._PROCESS_SUBTAB, 'allow_edit': ezt.boolean(True), 'new_template_form': ezt.boolean(True), 'initial_members_only': ezt.boolean(False), 'template_name': '', 'initial_content': '', 'initial_must_edit_summary': ezt.boolean(False), 'initial_summary': '', 'initial_status': '', 'initial_owner': '', 'initial_owner_defaults_to_member': ezt.boolean(False), 'initial_components': '', 'initial_component_required': ezt.boolean(False), 'initial_admins': '', 'fields': [view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], 'initial_add_approvals': ezt.boolean(False), 'initial_phases': initial_phases, 'approvals': [view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], 'prechecked_approvals': [], 'required_approval_ids': [], 'approval_subfields_present': ezt.boolean(approval_subfields_present), 'phase_fields_present': ezt.boolean(False)}\n<|end_body_1|>\n\n<|body_start_2|>\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n parsed = template_helpers.ParseTemplateRequest(post_data, config)\n field_helpers.ShiftEnumFieldsIntoLabels(parsed.labels, [], parsed.field_val_strs, [], config)\n if not parsed.name:\n mr.errors.name = 'Please provide a template name'\n if self.services.template.GetTemplateByName(mr.cnxn, parsed.name, mr.project_id):\n mr.errors.name = 'Template with name %s already exists' % parsed.name\n admin_ids, owner_id, component_ids, field_values, phases, approvals = template_helpers.GetTemplateInfoFromParsed(mr, self.services, parsed, config)\n if mr.errors.AnyErrors():\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], field_values, {})\n prechecked_approvals = template_helpers.GetCheckedApprovalsFromParsed(parsed.approvals_to_phase_idx)\n self.PleaseCorrect(mr, initial_members_only=ezt.boolean(parsed.members_only), template_name=parsed.name, initial_content=parsed.summary, initial_must_edit_summary=ezt.boolean(parsed.summary_must_be_edited), initial_description=parsed.content, initial_status=parsed.status, initial_owner=parsed.owner_str, initial_owner_defaults_to_member=ezt.boolean(parsed.owner_defaults_to_member), initial_components=', '.join(parsed.component_paths), initial_component_required=ezt.boolean(parsed.component_required), initial_admins=parsed.admin_str, labels=parsed.labels, fields=[view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], initial_add_approvals=ezt.boolean(parsed.add_approvals), initial_phases=[tracker_pb2.Phase(name=name) for name in parsed.phase_names], approvals=[view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], prechecked_approvals=prechecked_approvals, required_approval_ids=parsed.required_approval_ids)\n return\n labels = [label for label in parsed.labels if label]\n self.services.template.CreateIssueTemplateDef(mr.cnxn, mr.project_id, parsed.name, parsed.content, parsed.summary, parsed.summary_must_be_edited, parsed.status, parsed.members_only, parsed.owner_defaults_to_member, parsed.component_required, owner_id, labels, component_ids, admin_ids, field_values, phases=phases, approval_values=approvals)\n return framework_helpers.FormatAbsoluteURL(mr, urls.ADMIN_TEMPLATES, saved=1, ts=int(time.time()))\n<|end_body_2|>\n", "revision_id": "b5d4783f99461438ca9e6a477535617fadab6ba3", "skeleton": "<|skeleton|>\nclass TemplateCreate:\n \"\"\"Servlet allowing project owners to create an issue template.\"\"\"\n\n def AssertBasePermission(self, mr):\n \"\"\"Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\"\"\"\n <|body_0|>\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\"\"\"\n <|body_1|>\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TemplateCreate:\n \"\"\"Servlet allowing project owners to create an issue template.\"\"\"\n\n def AssertBasePermission(self, mr):\n \"\"\"Check whether the user has any permission to visit this page. Args: mr: commonly used info parsed from the request\"\"\"\n super(TemplateCreate, self).AssertBasePermission(mr)\n if not self.CheckPerm(mr, permissions.EDIT_PROJECT):\n raise permissions.PermissionException('User is not allowed to administer this project')\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page. Args: mr: commonly used info parsed from the request. Returns: Dict of values used by EZT for rendering the page.\"\"\"\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], [], {})\n approval_subfields_present = any((fv.field_def.is_approval_subfield for fv in field_views))\n initial_phases = [tracker_pb2.Phase()] * template_helpers.MAX_NUM_PHASES\n return {'admin_tab_mode': self._PROCESS_SUBTAB, 'allow_edit': ezt.boolean(True), 'new_template_form': ezt.boolean(True), 'initial_members_only': ezt.boolean(False), 'template_name': '', 'initial_content': '', 'initial_must_edit_summary': ezt.boolean(False), 'initial_summary': '', 'initial_status': '', 'initial_owner': '', 'initial_owner_defaults_to_member': ezt.boolean(False), 'initial_components': '', 'initial_component_required': ezt.boolean(False), 'initial_admins': '', 'fields': [view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], 'initial_add_approvals': ezt.boolean(False), 'initial_phases': initial_phases, 'approvals': [view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], 'prechecked_approvals': [], 'required_approval_ids': [], 'approval_subfields_present': ezt.boolean(approval_subfields_present), 'phase_fields_present': ezt.boolean(False)}\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Validate and store the contents of the issues tracker admin page. Args: mr: commonly used info parsed from the request. post_data: HTML form data from the request. Returns: String URL to redirect the user to, or None if response was already sent.\"\"\"\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n parsed = template_helpers.ParseTemplateRequest(post_data, config)\n field_helpers.ShiftEnumFieldsIntoLabels(parsed.labels, [], parsed.field_val_strs, [], config)\n if not parsed.name:\n mr.errors.name = 'Please provide a template name'\n if self.services.template.GetTemplateByName(mr.cnxn, parsed.name, mr.project_id):\n mr.errors.name = 'Template with name %s already exists' % parsed.name\n admin_ids, owner_id, component_ids, field_values, phases, approvals = template_helpers.GetTemplateInfoFromParsed(mr, self.services, parsed, config)\n if mr.errors.AnyErrors():\n field_views = tracker_views.MakeAllFieldValueViews(config, [], [], field_values, {})\n prechecked_approvals = template_helpers.GetCheckedApprovalsFromParsed(parsed.approvals_to_phase_idx)\n self.PleaseCorrect(mr, initial_members_only=ezt.boolean(parsed.members_only), template_name=parsed.name, initial_content=parsed.summary, initial_must_edit_summary=ezt.boolean(parsed.summary_must_be_edited), initial_description=parsed.content, initial_status=parsed.status, initial_owner=parsed.owner_str, initial_owner_defaults_to_member=ezt.boolean(parsed.owner_defaults_to_member), initial_components=', '.join(parsed.component_paths), initial_component_required=ezt.boolean(parsed.component_required), initial_admins=parsed.admin_str, labels=parsed.labels, fields=[view for view in field_views if view.field_def.type_name is not 'APPROVAL_TYPE'], initial_add_approvals=ezt.boolean(parsed.add_approvals), initial_phases=[tracker_pb2.Phase(name=name) for name in parsed.phase_names], approvals=[view for view in field_views if view.field_def.type_name is 'APPROVAL_TYPE'], prechecked_approvals=prechecked_approvals, required_approval_ids=parsed.required_approval_ids)\n return\n labels = [label for label in parsed.labels if label]\n self.services.template.CreateIssueTemplateDef(mr.cnxn, mr.project_id, parsed.name, parsed.content, parsed.summary, parsed.summary_must_be_edited, parsed.status, parsed.members_only, parsed.owner_defaults_to_member, parsed.component_required, owner_id, labels, component_ids, admin_ids, field_values, phases=phases, approval_values=approvals)\n return framework_helpers.FormatAbsoluteURL(mr, urls.ADMIN_TEMPLATES, saved=1, ts=int(time.time()))\n", "source": "the_stack_v2_python_sparse", "source_path": "appengine/monorail/tracker/templatecreate.py", "source_repo": "xinghun61/infra", "split": "val", "star_events_count": 2} {"blob_id": "cd540ad9e95d9409ff4beb53c26767868f3927d4", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn InternalDomainFederation()", "from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\nfrom .prompt_login_behavior import PromptLoginBehavior\nfrom .saml_or_ws_fed_provider import SamlOrWsFedProvider\nfrom .signing_certificate_update_status import SigningCertificateUpdateStatus\nfrom .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\nfrom .prompt_login_behavior import PromptLoginBehavior\nfrom .saml_or_ws_fed_provider import SamlOrWsFedProvider\nfrom .signing_certificate_update_status import SigningCertificateUpdateStatus\nfields: Dict[str, Callable[[Any], None]] = {'activeSignInUri': lambda n: setattr(self, 'active_sign_in_uri', n.get_str_value()), 'federatedIdpMfaBehavior': lambda n: setattr(self, 'federated_idp_mfa_behavior', n.get_enum_value(FederatedIdpMfaBehavior)), 'isSignedAuthenticationRequestRequired': lambda n: setattr(self, 'is_signed_authentication_request_required', n.get_bool_value()), 'nextSigningCertificate': lambda n: setattr(self, 'next_signing_certificate', n.get_str_value()), 'promptLoginBehavior': lambda n: setattr(self, 'prompt_login_behavior', n.get_enum_value(PromptLoginBehavior)), 'signOutUri': lambda n: setattr(self, 'sign_out_uri', n.get_str_value()), 'signingCertificateUpdateStatus': lambda n: setattr(self, 'signing_certificate_update_status', n.get_object_value(SigningCertificateUpdateStatus))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('activeSignInUri', self.active_sign_in_uri)\nwriter.write_enum_value('federatedIdpMfaBehavior', self.federated_idp_mfa_behavior)\nwriter.write_bool_value('isSignedAuthenticationRequestRequired', self.is_signed_authentication_request_required)\nwriter.write_str_value('nextSigningCertificate', self.next_signing_certificate)\nwriter.write_enum_value('promptLoginBehavior', self.prompt_login_behavior)\nwriter.write_str_value('signOutUri', self.sign_out_uri)\nwriter.write_object_value('signingCertificateUpdateStatus', self.signing_certificate_update_status)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return InternalDomainFederation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n fields: Dict[str, Callable[[Any], None]] = {'activeSignInUri': lambda n: setattr(self, 'active_sign_in_uri', n.get_str_value()), 'federatedIdpMfaBehavior': lambda n: setattr(self, 'federated_idp_mfa_behavior', n.get_enum_value(FederatedIdpMfaBehavior)), 'isSignedAuthenticationRequestRequired': lambda n: setattr(self, 'is_signed_authentication_request_required', n.get_bool_value()), 'nextSigningCertificate': lambda n: setattr(self, 'next_signing_certificate', n.get_str_value()), 'promptLoginBehavior': lambda n: setattr(self, 'prompt_login_behavior', n.get_enum_value(PromptLoginBehavior)), 'signOutUri': lambda n: setattr(self, 'sign_out_uri', n.get_str_value()), 'signingCertificateUpdateStatus': lambda n: setattr(self, 'signing_certificate_update_status', n.get_object_value(SigningCertificateUpdateStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('activeSignInUri', self.active_sign_in_uri)\n writer.write_enum_value('federatedIdpMfaBehavior', self.federated_idp_mfa_behavior)\n writer.write_bool_value('isSignedAuthenticationRequestRequired', self.is_signed_authentication_request_required)\n writer.write_str_value('nextSigningCertificate', self.next_signing_certificate)\n writer.write_enum_value('promptLoginBehavior', self.prompt_login_behavior)\n writer.write_str_value('signOutUri', self.sign_out_uri)\n writer.write_object_value('signingCertificateUpdateStatus', self.signing_certificate_update_status)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "InternalDomainFederation", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InternalDomainFederation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return InternalDomainFederation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n fields: Dict[str, Callable[[Any], None]] = {'activeSignInUri': lambda n: setattr(self, 'active_sign_in_uri', n.get_str_value()), 'federatedIdpMfaBehavior': lambda n: setattr(self, 'federated_idp_mfa_behavior', n.get_enum_value(FederatedIdpMfaBehavior)), 'isSignedAuthenticationRequestRequired': lambda n: setattr(self, 'is_signed_authentication_request_required', n.get_bool_value()), 'nextSigningCertificate': lambda n: setattr(self, 'next_signing_certificate', n.get_str_value()), 'promptLoginBehavior': lambda n: setattr(self, 'prompt_login_behavior', n.get_enum_value(PromptLoginBehavior)), 'signOutUri': lambda n: setattr(self, 'sign_out_uri', n.get_str_value()), 'signingCertificateUpdateStatus': lambda n: setattr(self, 'signing_certificate_update_status', n.get_object_value(SigningCertificateUpdateStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('activeSignInUri', self.active_sign_in_uri)\n writer.write_enum_value('federatedIdpMfaBehavior', self.federated_idp_mfa_behavior)\n writer.write_bool_value('isSignedAuthenticationRequestRequired', self.is_signed_authentication_request_required)\n writer.write_str_value('nextSigningCertificate', self.next_signing_certificate)\n writer.write_enum_value('promptLoginBehavior', self.prompt_login_behavior)\n writer.write_str_value('signOutUri', self.sign_out_uri)\n writer.write_object_value('signingCertificateUpdateStatus', self.signing_certificate_update_status)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000142", "length_bytes": 6466, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `InternalDomainFederation` described below.\n\nClass description:\nImplement the InternalDomainFederation class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `InternalDomainFederation` described below.\n\nClass description:\nImplement the InternalDomainFederation class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass InternalDomainFederation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return InternalDomainFederation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n fields: Dict[str, Callable[[Any], None]] = {'activeSignInUri': lambda n: setattr(self, 'active_sign_in_uri', n.get_str_value()), 'federatedIdpMfaBehavior': lambda n: setattr(self, 'federated_idp_mfa_behavior', n.get_enum_value(FederatedIdpMfaBehavior)), 'isSignedAuthenticationRequestRequired': lambda n: setattr(self, 'is_signed_authentication_request_required', n.get_bool_value()), 'nextSigningCertificate': lambda n: setattr(self, 'next_signing_certificate', n.get_str_value()), 'promptLoginBehavior': lambda n: setattr(self, 'prompt_login_behavior', n.get_enum_value(PromptLoginBehavior)), 'signOutUri': lambda n: setattr(self, 'sign_out_uri', n.get_str_value()), 'signingCertificateUpdateStatus': lambda n: setattr(self, 'signing_certificate_update_status', n.get_object_value(SigningCertificateUpdateStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('activeSignInUri', self.active_sign_in_uri)\n writer.write_enum_value('federatedIdpMfaBehavior', self.federated_idp_mfa_behavior)\n writer.write_bool_value('isSignedAuthenticationRequestRequired', self.is_signed_authentication_request_required)\n writer.write_str_value('nextSigningCertificate', self.next_signing_certificate)\n writer.write_enum_value('promptLoginBehavior', self.prompt_login_behavior)\n writer.write_str_value('signOutUri', self.sign_out_uri)\n writer.write_object_value('signingCertificateUpdateStatus', self.signing_certificate_update_status)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass InternalDomainFederation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InternalDomainFederation:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> InternalDomainFederation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: InternalDomainFederation\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return InternalDomainFederation()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n from .federated_idp_mfa_behavior import FederatedIdpMfaBehavior\n from .prompt_login_behavior import PromptLoginBehavior\n from .saml_or_ws_fed_provider import SamlOrWsFedProvider\n from .signing_certificate_update_status import SigningCertificateUpdateStatus\n fields: Dict[str, Callable[[Any], None]] = {'activeSignInUri': lambda n: setattr(self, 'active_sign_in_uri', n.get_str_value()), 'federatedIdpMfaBehavior': lambda n: setattr(self, 'federated_idp_mfa_behavior', n.get_enum_value(FederatedIdpMfaBehavior)), 'isSignedAuthenticationRequestRequired': lambda n: setattr(self, 'is_signed_authentication_request_required', n.get_bool_value()), 'nextSigningCertificate': lambda n: setattr(self, 'next_signing_certificate', n.get_str_value()), 'promptLoginBehavior': lambda n: setattr(self, 'prompt_login_behavior', n.get_enum_value(PromptLoginBehavior)), 'signOutUri': lambda n: setattr(self, 'sign_out_uri', n.get_str_value()), 'signingCertificateUpdateStatus': lambda n: setattr(self, 'signing_certificate_update_status', n.get_object_value(SigningCertificateUpdateStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('activeSignInUri', self.active_sign_in_uri)\n writer.write_enum_value('federatedIdpMfaBehavior', self.federated_idp_mfa_behavior)\n writer.write_bool_value('isSignedAuthenticationRequestRequired', self.is_signed_authentication_request_required)\n writer.write_str_value('nextSigningCertificate', self.next_signing_certificate)\n writer.write_enum_value('promptLoginBehavior', self.prompt_login_behavior)\n writer.write_str_value('signOutUri', self.sign_out_uri)\n writer.write_object_value('signingCertificateUpdateStatus', self.signing_certificate_update_status)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/internal_domain_federation.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135} {"blob_id": "3571fe525cc229d60ac2beecc85b12280658eea6", "bodies": ["site = models.SiteSettings.objects.get()\ndata = {'form': forms.RegistrationLimitedForm(instance=site)}\nreturn TemplateResponse(request, 'settings/registration_limited.html', data)", "site = models.SiteSettings.objects.get()\nform = forms.RegistrationLimitedForm(request.POST, request.FILES, instance=site)\nif not form.is_valid():\n data = {'form': form}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\nsite = form.save(request)\ndata = {'form': forms.RegistrationLimitedForm(instance=site), 'success': True}\nreturn TemplateResponse(request, 'settings/registration_limited.html', data)"], "bodies_text": "<|body_start_0|>\n site = models.SiteSettings.objects.get()\n data = {'form': forms.RegistrationLimitedForm(instance=site)}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n form = forms.RegistrationLimitedForm(request.POST, request.FILES, instance=site)\n if not form.is_valid():\n data = {'form': form}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n site = form.save(request)\n data = {'form': forms.RegistrationLimitedForm(instance=site), 'success': True}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_1|>\n", "class_docstring": "Things related to registering that non-admins owners can change", "class_name": "RegistrationLimited", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegistrationLimited:\n \"\"\"Things related to registering that non-admins owners can change\"\"\"\n\n def get(self, request):\n \"\"\"edit form\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"edit the site settings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n site = models.SiteSettings.objects.get()\n data = {'form': forms.RegistrationLimitedForm(instance=site)}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n form = forms.RegistrationLimitedForm(request.POST, request.FILES, instance=site)\n if not form.is_valid():\n data = {'form': form}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n site = form.save(request)\n data = {'form': forms.RegistrationLimitedForm(instance=site), 'success': True}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000143", "length_bytes": 3435, "license_type": "no_license", "methods": [{"docstring": "edit form", "name": "get", "signature": "def get(self, request)"}, {"docstring": "edit the site settings", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001044", "prompt": "Implement the Python class `RegistrationLimited` described below.\n\nClass description:\nThings related to registering that non-admins owners can change\n\nMethod signatures and docstrings:\n- def get(self, request): edit form\n- def post(self, request): edit the site settings", "prompted_full_text": "Implement the Python class `RegistrationLimited` described below.\n\nClass description:\nThings related to registering that non-admins owners can change\n\nMethod signatures and docstrings:\n- def get(self, request): edit form\n- def post(self, request): edit the site settings\n\n<|skeleton|>\nclass RegistrationLimited:\n \"\"\"Things related to registering that non-admins owners can change\"\"\"\n\n def get(self, request):\n \"\"\"edit form\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"edit the site settings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n site = models.SiteSettings.objects.get()\n data = {'form': forms.RegistrationLimitedForm(instance=site)}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n form = forms.RegistrationLimitedForm(request.POST, request.FILES, instance=site)\n if not form.is_valid():\n data = {'form': form}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n site = form.save(request)\n data = {'form': forms.RegistrationLimitedForm(instance=site), 'success': True}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n<|end_body_1|>\n", "revision_id": "0f8da5b738047f3c34d60d93f59bdedd8f797224", "skeleton": "<|skeleton|>\nclass RegistrationLimited:\n \"\"\"Things related to registering that non-admins owners can change\"\"\"\n\n def get(self, request):\n \"\"\"edit form\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"edit the site settings\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RegistrationLimited:\n \"\"\"Things related to registering that non-admins owners can change\"\"\"\n\n def get(self, request):\n \"\"\"edit form\"\"\"\n site = models.SiteSettings.objects.get()\n data = {'form': forms.RegistrationLimitedForm(instance=site)}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n\n def post(self, request):\n \"\"\"edit the site settings\"\"\"\n site = models.SiteSettings.objects.get()\n form = forms.RegistrationLimitedForm(request.POST, request.FILES, instance=site)\n if not form.is_valid():\n data = {'form': form}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n site = form.save(request)\n data = {'form': forms.RegistrationLimitedForm(instance=site), 'success': True}\n return TemplateResponse(request, 'settings/registration_limited.html', data)\n", "source": "the_stack_v2_python_sparse", "source_path": "bookwyrm/views/admin/site.py", "source_repo": "bookwyrm-social/bookwyrm", "split": "val", "star_events_count": 1398} {"blob_id": "09ceeff88db61da4ecf6a84878bedc5302bdf39a", "bodies": ["if self.request.version == 'v6':\n return StrikeDetailsSerializerV6\nelif self.request.version == 'v7':\n return StrikeDetailsSerializerV6", "if request.version == 'v6':\n return self.get_impl(request, strike_id)\nelif request.version == 'v7':\n return self.get_impl(request, strike_id)\nraise Http404()", "try:\n is_staff = False\n if request.user:\n is_staff = request.user.is_staff\n strike = Strike.objects.get_details(strike_id, is_staff)\nexcept Strike.DoesNotExist:\n raise Http404\nserializer = self.get_serializer(strike)\nreturn Response(serializer.data)", "if request.version == 'v6':\n return self.patch_impl_v6(request, strike_id)\nelif request.version == 'v7':\n return self.patch_impl_v6(request, strike_id)\nraise Http404()", "title = rest_util.parse_string(request, 'title', required=False)\ndescription = rest_util.parse_string(request, 'description', required=False)\nconfiguration = rest_util.parse_dict(request, 'configuration', required=False)\nconfig = None\ntry:\n if configuration:\n config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()\nexcept InvalidStrikeConfiguration as ex:\n raise BadParameter('Strike configuration invalid: %s' % unicode(ex))\ntry:\n if config:\n new_config = config.get_dict()\n old_config = Strike.objects.get_details(strike_id)\n Strike.objects.edit_strike(strike_id, title, description, config)\n if config and old_config.configuration['workspace'] != new_config['workspace']:\n strike_job = old_config.job\n Job.objects.update_jobs_to_canceled([strike_job], timezone.now())\n requeue_jobs = []\n requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))\n msg = create_requeue_jobs_messages(requeue_jobs)\n CommandMessageManager().send_messages(msg)\nexcept Strike.DoesNotExist:\n raise Http404\nexcept InvalidStrikeConfiguration as ex:\n logger.exception('Unable to edit Strike process: %s', strike_id)\n raise BadParameter(unicode(ex))\nreturn Response(status=status.HTTP_204_NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n if self.request.version == 'v6':\n return StrikeDetailsSerializerV6\n elif self.request.version == 'v7':\n return StrikeDetailsSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.get_impl(request, strike_id)\n elif request.version == 'v7':\n return self.get_impl(request, strike_id)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n is_staff = False\n if request.user:\n is_staff = request.user.is_staff\n strike = Strike.objects.get_details(strike_id, is_staff)\n except Strike.DoesNotExist:\n raise Http404\n serializer = self.get_serializer(strike)\n return Response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self.patch_impl_v6(request, strike_id)\n elif request.version == 'v7':\n return self.patch_impl_v6(request, strike_id)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=False)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration', required=False)\n config = None\n try:\n if configuration:\n config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidStrikeConfiguration as ex:\n raise BadParameter('Strike configuration invalid: %s' % unicode(ex))\n try:\n if config:\n new_config = config.get_dict()\n old_config = Strike.objects.get_details(strike_id)\n Strike.objects.edit_strike(strike_id, title, description, config)\n if config and old_config.configuration['workspace'] != new_config['workspace']:\n strike_job = old_config.job\n Job.objects.update_jobs_to_canceled([strike_job], timezone.now())\n requeue_jobs = []\n requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))\n msg = create_requeue_jobs_messages(requeue_jobs)\n CommandMessageManager().send_messages(msg)\n except Strike.DoesNotExist:\n raise Http404\n except InvalidStrikeConfiguration as ex:\n logger.exception('Unable to edit Strike process: %s', strike_id)\n raise BadParameter(unicode(ex))\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_4|>\n", "class_docstring": "This view is the endpoint for retrieving/updating details of a Strike process.", "class_name": "StrikeDetailsView", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StrikeDetailsView:\n \"\"\"This view is the endpoint for retrieving/updating details of a Strike process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def get(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def get_impl(self, request, strike_id):\n \"\"\"Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def patch(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def patch_impl_v6(self, request, strike_id):\n \"\"\"Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return StrikeDetailsSerializerV6\n elif self.request.version == 'v7':\n return StrikeDetailsSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.get_impl(request, strike_id)\n elif request.version == 'v7':\n return self.get_impl(request, strike_id)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n is_staff = False\n if request.user:\n is_staff = request.user.is_staff\n strike = Strike.objects.get_details(strike_id, is_staff)\n except Strike.DoesNotExist:\n raise Http404\n serializer = self.get_serializer(strike)\n return Response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self.patch_impl_v6(request, strike_id)\n elif request.version == 'v7':\n return self.patch_impl_v6(request, strike_id)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=False)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration', required=False)\n config = None\n try:\n if configuration:\n config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidStrikeConfiguration as ex:\n raise BadParameter('Strike configuration invalid: %s' % unicode(ex))\n try:\n if config:\n new_config = config.get_dict()\n old_config = Strike.objects.get_details(strike_id)\n Strike.objects.edit_strike(strike_id, title, description, config)\n if config and old_config.configuration['workspace'] != new_config['workspace']:\n strike_job = old_config.job\n Job.objects.update_jobs_to_canceled([strike_job], timezone.now())\n requeue_jobs = []\n requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))\n msg = create_requeue_jobs_messages(requeue_jobs)\n CommandMessageManager().send_messages(msg)\n except Strike.DoesNotExist:\n raise Http404\n except InvalidStrikeConfiguration as ex:\n logger.exception('Unable to edit Strike process: %s', strike_id)\n raise BadParameter(unicode(ex))\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000144", "length_bytes": 30689, "license_type": "permissive", "methods": [{"docstring": "Returns the appropriate serializer based off the requests version of the REST API", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "get", "signature": "def get(self, request, strike_id)"}, {"docstring": "Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "get_impl", "signature": "def get_impl(self, request, strike_id)"}, {"docstring": "Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "patch", "signature": "def patch(self, request, strike_id)"}, {"docstring": "Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "patch_impl_v6", "signature": "def patch_impl_v6(self, request, strike_id)"}], "n_methods": 5, "prompt": "Implement the Python class `StrikeDetailsView` described below.\n\nClass description:\nThis view is the endpoint for retrieving/updating details of a Strike process.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def get(self, request, strike_id): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def get_impl(self, request, strike_id): Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def patch(self, request, strike_id): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def patch_impl_v6(self, request, strike_id): Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "prompted_full_text": "Implement the Python class `StrikeDetailsView` described below.\n\nClass description:\nThis view is the endpoint for retrieving/updating details of a Strike process.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def get(self, request, strike_id): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def get_impl(self, request, strike_id): Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def patch(self, request, strike_id): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def patch_impl_v6(self, request, strike_id): Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n\n<|skeleton|>\nclass StrikeDetailsView:\n \"\"\"This view is the endpoint for retrieving/updating details of a Strike process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def get(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def get_impl(self, request, strike_id):\n \"\"\"Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def patch(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def patch_impl_v6(self, request, strike_id):\n \"\"\"Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return StrikeDetailsSerializerV6\n elif self.request.version == 'v7':\n return StrikeDetailsSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.get_impl(request, strike_id)\n elif request.version == 'v7':\n return self.get_impl(request, strike_id)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n is_staff = False\n if request.user:\n is_staff = request.user.is_staff\n strike = Strike.objects.get_details(strike_id, is_staff)\n except Strike.DoesNotExist:\n raise Http404\n serializer = self.get_serializer(strike)\n return Response(serializer.data)\n<|end_body_2|>\n\n<|body_start_3|>\n if request.version == 'v6':\n return self.patch_impl_v6(request, strike_id)\n elif request.version == 'v7':\n return self.patch_impl_v6(request, strike_id)\n raise Http404()\n<|end_body_3|>\n\n<|body_start_4|>\n title = rest_util.parse_string(request, 'title', required=False)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration', required=False)\n config = None\n try:\n if configuration:\n config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidStrikeConfiguration as ex:\n raise BadParameter('Strike configuration invalid: %s' % unicode(ex))\n try:\n if config:\n new_config = config.get_dict()\n old_config = Strike.objects.get_details(strike_id)\n Strike.objects.edit_strike(strike_id, title, description, config)\n if config and old_config.configuration['workspace'] != new_config['workspace']:\n strike_job = old_config.job\n Job.objects.update_jobs_to_canceled([strike_job], timezone.now())\n requeue_jobs = []\n requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))\n msg = create_requeue_jobs_messages(requeue_jobs)\n CommandMessageManager().send_messages(msg)\n except Strike.DoesNotExist:\n raise Http404\n except InvalidStrikeConfiguration as ex:\n logger.exception('Unable to edit Strike process: %s', strike_id)\n raise BadParameter(unicode(ex))\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_4|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass StrikeDetailsView:\n \"\"\"This view is the endpoint for retrieving/updating details of a Strike process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def get(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def get_impl(self, request, strike_id):\n \"\"\"Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n def patch(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_3|>\n\n def patch_impl_v6(self, request, strike_id):\n \"\"\"Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StrikeDetailsView:\n \"\"\"This view is the endpoint for retrieving/updating details of a Strike process.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n if self.request.version == 'v6':\n return StrikeDetailsSerializerV6\n elif self.request.version == 'v7':\n return StrikeDetailsSerializerV6\n\n def get(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self.get_impl(request, strike_id)\n elif request.version == 'v7':\n return self.get_impl(request, strike_id)\n raise Http404()\n\n def get_impl(self, request, strike_id):\n \"\"\"Retrieves the details for a Strike process and return them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n try:\n is_staff = False\n if request.user:\n is_staff = request.user.is_staff\n strike = Strike.objects.get_details(strike_id, is_staff)\n except Strike.DoesNotExist:\n raise Http404\n serializer = self.get_serializer(strike)\n return Response(serializer.data)\n\n def patch(self, request, strike_id):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self.patch_impl_v6(request, strike_id)\n elif request.version == 'v7':\n return self.patch_impl_v6(request, strike_id)\n raise Http404()\n\n def patch_impl_v6(self, request, strike_id):\n \"\"\"Edits an existing Strike process and returns the updated details :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :param strike_id: The ID of the Strike process :type strike_id: int encoded as a str :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n title = rest_util.parse_string(request, 'title', required=False)\n description = rest_util.parse_string(request, 'description', required=False)\n configuration = rest_util.parse_dict(request, 'configuration', required=False)\n config = None\n try:\n if configuration:\n config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()\n except InvalidStrikeConfiguration as ex:\n raise BadParameter('Strike configuration invalid: %s' % unicode(ex))\n try:\n if config:\n new_config = config.get_dict()\n old_config = Strike.objects.get_details(strike_id)\n Strike.objects.edit_strike(strike_id, title, description, config)\n if config and old_config.configuration['workspace'] != new_config['workspace']:\n strike_job = old_config.job\n Job.objects.update_jobs_to_canceled([strike_job], timezone.now())\n requeue_jobs = []\n requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))\n msg = create_requeue_jobs_messages(requeue_jobs)\n CommandMessageManager().send_messages(msg)\n except Strike.DoesNotExist:\n raise Http404\n except InvalidStrikeConfiguration as ex:\n logger.exception('Unable to edit Strike process: %s', strike_id)\n raise BadParameter(unicode(ex))\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/ingest/views.py", "source_repo": "kfconsultant/scale", "split": "val", "star_events_count": 0} {"blob_id": "712d0fca96e172d2e310644e49925035d3cfb08b", "bodies": ["H, edges = np.histogramdd(data, bins=bins, normed=True)\nHmasked = np.ma.masked_where(H == 0, H)\nxedges, yedges = edges[:2]\nif 'levels' not in kwargs:\n kwargs['levels'] = np.linspace(0, 10)\nax.contourf(xedges[1:], yedges[1:], Hmasked.T[where], **kwargs)\nax.set_xlabel('time after front passage')\nax.set_ylabel('vertical velocity')\nreturn ax", "iZ = None\nq = quantity\nhist_kwargs = {'range': range, 'bins': bins}\nq_bins = np.histogram(q[0], **hist_kwargs)[1]\nhist_z = np.vstack((np.histogram(q[z], **hist_kwargs)[0] for z in iZ))\nlevels = np.linspace(0, 500, 100)\ncontourf = ax.contourf(q_bins[1:], iZ, hist_z, levels=levels)\nax.set_title('distribution function')\nax.set_xlabel('quantity')\nax.set_ylabel('z')\nreturn contourf"], "bodies_text": "<|body_start_0|>\n H, edges = np.histogramdd(data, bins=bins, normed=True)\n Hmasked = np.ma.masked_where(H == 0, H)\n xedges, yedges = edges[:2]\n if 'levels' not in kwargs:\n kwargs['levels'] = np.linspace(0, 10)\n ax.contourf(xedges[1:], yedges[1:], Hmasked.T[where], **kwargs)\n ax.set_xlabel('time after front passage')\n ax.set_ylabel('vertical velocity')\n return ax\n<|end_body_0|>\n\n<|body_start_1|>\n iZ = None\n q = quantity\n hist_kwargs = {'range': range, 'bins': bins}\n q_bins = np.histogram(q[0], **hist_kwargs)[1]\n hist_z = np.vstack((np.histogram(q[z], **hist_kwargs)[0] for z in iZ))\n levels = np.linspace(0, 500, 100)\n contourf = ax.contourf(q_bins[1:], iZ, hist_z, levels=levels)\n ax.set_title('distribution function')\n ax.set_xlabel('quantity')\n ax.set_ylabel('z')\n return contourf\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Histograms", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Histograms:\n\n def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs):\n \"\"\"Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\"\"\"\n <|body_0|>\n\n def vertical_histogram(self, ax, quantity, bins, levels):\n \"\"\"Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n H, edges = np.histogramdd(data, bins=bins, normed=True)\n Hmasked = np.ma.masked_where(H == 0, H)\n xedges, yedges = edges[:2]\n if 'levels' not in kwargs:\n kwargs['levels'] = np.linspace(0, 10)\n ax.contourf(xedges[1:], yedges[1:], Hmasked.T[where], **kwargs)\n ax.set_xlabel('time after front passage')\n ax.set_ylabel('vertical velocity')\n return ax\n<|end_body_0|>\n\n<|body_start_1|>\n iZ = None\n q = quantity\n hist_kwargs = {'range': range, 'bins': bins}\n q_bins = np.histogram(q[0], **hist_kwargs)[1]\n hist_z = np.vstack((np.histogram(q[z], **hist_kwargs)[0] for z in iZ))\n levels = np.linspace(0, 500, 100)\n contourf = ax.contourf(q_bins[1:], iZ, hist_z, levels=levels)\n ax.set_title('distribution function')\n ax.set_xlabel('quantity')\n ax.set_ylabel('z')\n return contourf\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000145", "length_bytes": 7603, "license_type": "no_license", "methods": [{"docstring": "Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use", "name": "plot_time_histogram", "signature": "def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs)"}, {"docstring": "Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.", "name": "vertical_histogram", "signature": "def vertical_histogram(self, ax, quantity, bins, levels)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006941", "prompt": "Implement the Python class `Histograms` described below.\n\nClass description:\nImplement the Histograms class.\n\nMethod signatures and docstrings:\n- def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs): Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\n- def vertical_histogram(self, ax, quantity, bins, levels): Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.", "prompted_full_text": "Implement the Python class `Histograms` described below.\n\nClass description:\nImplement the Histograms class.\n\nMethod signatures and docstrings:\n- def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs): Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\n- def vertical_histogram(self, ax, quantity, bins, levels): Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.\n\n<|skeleton|>\nclass Histograms:\n\n def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs):\n \"\"\"Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\"\"\"\n <|body_0|>\n\n def vertical_histogram(self, ax, quantity, bins, levels):\n \"\"\"Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n H, edges = np.histogramdd(data, bins=bins, normed=True)\n Hmasked = np.ma.masked_where(H == 0, H)\n xedges, yedges = edges[:2]\n if 'levels' not in kwargs:\n kwargs['levels'] = np.linspace(0, 10)\n ax.contourf(xedges[1:], yedges[1:], Hmasked.T[where], **kwargs)\n ax.set_xlabel('time after front passage')\n ax.set_ylabel('vertical velocity')\n return ax\n<|end_body_0|>\n\n<|body_start_1|>\n iZ = None\n q = quantity\n hist_kwargs = {'range': range, 'bins': bins}\n q_bins = np.histogram(q[0], **hist_kwargs)[1]\n hist_z = np.vstack((np.histogram(q[z], **hist_kwargs)[0] for z in iZ))\n levels = np.linspace(0, 500, 100)\n contourf = ax.contourf(q_bins[1:], iZ, hist_z, levels=levels)\n ax.set_title('distribution function')\n ax.set_xlabel('quantity')\n ax.set_ylabel('z')\n return contourf\n<|end_body_1|>\n", "revision_id": "4e1f20ecd979810a2f9f744e51b1eaf304b64bb6", "skeleton": "<|skeleton|>\nclass Histograms:\n\n def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs):\n \"\"\"Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\"\"\"\n <|body_0|>\n\n def vertical_histogram(self, ax, quantity, bins, levels):\n \"\"\"Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Histograms:\n def plot_time_histogram(ax, data, bins, where=np.s_[:], **kwargs):\n \"\"\"Plot a histogram of a quantity through time. bins - edges of the histogram bins where - z index or slice object to use\"\"\"\n H, edges = np.histogramdd(data, bins=bins, normed=True)\n Hmasked = np.ma.masked_where(H == 0, H)\n xedges, yedges = edges[:2]\n if 'levels' not in kwargs:\n kwargs['levels'] = np.linspace(0, 10)\n ax.contourf(xedges[1:], yedges[1:], Hmasked.T[where], **kwargs)\n ax.set_xlabel('time after front passage')\n ax.set_ylabel('vertical velocity')\n return ax\n\n def vertical_histogram(self, ax, quantity, bins, levels):\n \"\"\"Make a contour plot of the vertical distribution of some quantity. Creates a histogram (over time and space) for each vertical level and then concatenates these together.\"\"\"\n iZ = None\n q = quantity\n hist_kwargs = {'range': range, 'bins': bins}\n q_bins = np.histogram(q[0], **hist_kwargs)[1]\n hist_z = np.vstack((np.histogram(q[z], **hist_kwargs)[0] for z in iZ))\n levels = np.linspace(0, 500, 100)\n contourf = ax.contourf(q_bins[1:], iZ, hist_z, levels=levels)\n ax.set_title('distribution function')\n ax.set_xlabel('quantity')\n ax.set_ylabel('z')\n return contourf\n", "source": "the_stack_v2_python_sparse", "source_path": "gc_turbulence/analysis.py", "source_repo": "aaren/lab_turbulence", "split": "val", "star_events_count": 0} {"blob_id": "0885954fe9fdf1e8326c860ea50ae4150914aadf", "bodies": ["def dfs(node):\n if node is None:\n return ['null']\n res = []\n res.append(str(node.val))\n res.extend(dfs(node.left))\n res.extend(dfs(node.right))\n return res\nreturn ','.join(dfs(root))", "nodes = deque(data.split(','))\n\ndef dfs():\n s = nodes.popleft()\n if s == 'null':\n return None\n node = TreeNode(int(s))\n node.left = dfs()\n node.right = dfs()\n return node\nreturn dfs()"], "bodies_text": "<|body_start_0|>\n def dfs(node):\n if node is None:\n return ['null']\n res = []\n res.append(str(node.val))\n res.extend(dfs(node.left))\n res.extend(dfs(node.right))\n return res\n return ','.join(dfs(root))\n<|end_body_0|>\n\n<|body_start_1|>\n nodes = deque(data.split(','))\n\n def dfs():\n s = nodes.popleft()\n if s == 'null':\n return None\n node = TreeNode(int(s))\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node):\n if node is None:\n return ['null']\n res = []\n res.append(str(node.val))\n res.extend(dfs(node.left))\n res.extend(dfs(node.right))\n return res\n return ','.join(dfs(root))\n<|end_body_0|>\n\n<|body_start_1|>\n nodes = deque(data.split(','))\n\n def dfs():\n s = nodes.popleft()\n if s == 'null':\n return None\n node = TreeNode(int(s))\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000146", "length_bytes": 1512, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node):\n if node is None:\n return ['null']\n res = []\n res.append(str(node.val))\n res.extend(dfs(node.left))\n res.extend(dfs(node.right))\n return res\n return ','.join(dfs(root))\n<|end_body_0|>\n\n<|body_start_1|>\n nodes = deque(data.split(','))\n\n def dfs():\n s = nodes.popleft()\n if s == 'null':\n return None\n node = TreeNode(int(s))\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()\n<|end_body_1|>\n", "revision_id": "84b35ec9a4e4319b29eb5f0f226543c9f3f47630", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n def dfs(node):\n if node is None:\n return ['null']\n res = []\n res.append(str(node.val))\n res.extend(dfs(node.left))\n res.extend(dfs(node.right))\n return res\n return ','.join(dfs(root))\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n nodes = deque(data.split(','))\n\n def dfs():\n s = nodes.popleft()\n if s == 'null':\n return None\n node = TreeNode(int(s))\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()\n", "source": "the_stack_v2_python_sparse", "source_path": "serialize-and-deserialize-binary-tree.py", "source_repo": "maomao905/algo", "split": "val", "star_events_count": 0} {"blob_id": "a02aae8b0ad9829c94253ecbd7d633c80ff9b73a", "bodies": ["super().__init__(config)\nself.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight]))\nself.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias]))\nself.out_proj_weight = vit_layer.attention.output.dense.weight\nself.out_proj_bias = vit_layer.attention.output.dense.bias\nself.linear1_weight = vit_layer.intermediate.dense.weight\nself.linear1_bias = vit_layer.intermediate.dense.bias\nself.linear2_weight = vit_layer.output.dense.weight\nself.linear2_bias = vit_layer.output.dense.bias\nself.norm1_eps = vit_layer.layernorm_before.eps\nself.norm1_weight = vit_layer.layernorm_before.weight\nself.norm1_bias = vit_layer.layernorm_before.bias\nself.norm2_eps = vit_layer.layernorm_after.eps\nself.norm2_weight = vit_layer.layernorm_after.weight\nself.norm2_bias = vit_layer.layernorm_after.bias\nself.num_heads = vit_layer.attention.attention.num_attention_heads\nself.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads)\nself.is_last_layer = False\nself.norm_first = True\nself.validate_bettertransformer()", "super().forward_checker()\nattention_mask = None\nhidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\nif hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\nreturn (hidden_states,)"], "bodies_text": "<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias]))\n self.out_proj_weight = vit_layer.attention.output.dense.weight\n self.out_proj_bias = vit_layer.attention.output.dense.bias\n self.linear1_weight = vit_layer.intermediate.dense.weight\n self.linear1_bias = vit_layer.intermediate.dense.bias\n self.linear2_weight = vit_layer.output.dense.weight\n self.linear2_bias = vit_layer.output.dense.bias\n self.norm1_eps = vit_layer.layernorm_before.eps\n self.norm1_weight = vit_layer.layernorm_before.weight\n self.norm1_bias = vit_layer.layernorm_before.bias\n self.norm2_eps = vit_layer.layernorm_after.eps\n self.norm2_weight = vit_layer.layernorm_after.weight\n self.norm2_bias = vit_layer.layernorm_after.bias\n self.num_heads = vit_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ViTLayerBetterTransformer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ViTLayerBetterTransformer:\n\n def __init__(self, vit_layer, config):\n \"\"\"A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias]))\n self.out_proj_weight = vit_layer.attention.output.dense.weight\n self.out_proj_bias = vit_layer.attention.output.dense.bias\n self.linear1_weight = vit_layer.intermediate.dense.weight\n self.linear1_bias = vit_layer.intermediate.dense.bias\n self.linear2_weight = vit_layer.output.dense.weight\n self.linear2_bias = vit_layer.output.dense.bias\n self.norm1_eps = vit_layer.layernorm_before.eps\n self.norm1_weight = vit_layer.layernorm_before.weight\n self.norm1_bias = vit_layer.layernorm_before.bias\n self.norm2_eps = vit_layer.layernorm_after.eps\n self.norm2_weight = vit_layer.layernorm_after.weight\n self.norm2_bias = vit_layer.layernorm_after.bias\n self.num_heads = vit_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000147", "length_bytes": 43670, "license_type": "no_license", "methods": [{"docstring": "A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.", "name": "__init__", "signature": "def __init__(self, vit_layer, config)"}, {"docstring": "This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "name": "forward", "signature": "def forward(self, hidden_states, *_, **__)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001196", "prompt": "Implement the Python class `ViTLayerBetterTransformer` described below.\n\nClass description:\nImplement the ViTLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, vit_layer, config): A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\n- def forward(self, hidden_states, *_, **__): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "prompted_full_text": "Implement the Python class `ViTLayerBetterTransformer` described below.\n\nClass description:\nImplement the ViTLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, vit_layer, config): A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\n- def forward(self, hidden_states, *_, **__): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\n\n<|skeleton|>\nclass ViTLayerBetterTransformer:\n\n def __init__(self, vit_layer, config):\n \"\"\"A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias]))\n self.out_proj_weight = vit_layer.attention.output.dense.weight\n self.out_proj_bias = vit_layer.attention.output.dense.bias\n self.linear1_weight = vit_layer.intermediate.dense.weight\n self.linear1_bias = vit_layer.intermediate.dense.bias\n self.linear2_weight = vit_layer.output.dense.weight\n self.linear2_bias = vit_layer.output.dense.bias\n self.norm1_eps = vit_layer.layernorm_before.eps\n self.norm1_weight = vit_layer.layernorm_before.weight\n self.norm1_bias = vit_layer.layernorm_before.bias\n self.norm2_eps = vit_layer.layernorm_after.eps\n self.norm2_weight = vit_layer.layernorm_after.weight\n self.norm2_bias = vit_layer.layernorm_after.bias\n self.num_heads = vit_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass ViTLayerBetterTransformer:\n\n def __init__(self, vit_layer, config):\n \"\"\"A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ViTLayerBetterTransformer:\n def __init__(self, vit_layer, config):\n \"\"\"A simple conversion of the ViTLayer to its `BetterTransformer` implementation. Args: vit_layer (`torch.nn.Module`): The original `ViTLayer` where the weights needs to be retrieved.\"\"\"\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vit_layer.attention.attention.query.weight, vit_layer.attention.attention.key.weight, vit_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vit_layer.attention.attention.query.bias, vit_layer.attention.attention.key.bias, vit_layer.attention.attention.value.bias]))\n self.out_proj_weight = vit_layer.attention.output.dense.weight\n self.out_proj_bias = vit_layer.attention.output.dense.bias\n self.linear1_weight = vit_layer.intermediate.dense.weight\n self.linear1_bias = vit_layer.intermediate.dense.bias\n self.linear2_weight = vit_layer.output.dense.weight\n self.linear2_bias = vit_layer.output.dense.bias\n self.norm1_eps = vit_layer.layernorm_before.eps\n self.norm1_weight = vit_layer.layernorm_before.weight\n self.norm1_bias = vit_layer.layernorm_before.bias\n self.norm2_eps = vit_layer.layernorm_after.eps\n self.norm2_weight = vit_layer.layernorm_after.weight\n self.norm2_bias = vit_layer.layernorm_after.bias\n self.num_heads = vit_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vit_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_huggingface_optimum.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "9e38d3e014609ec828d0c145f69e16d67b0298bb", "bodies": ["if data['type'] == 'personal':\n if not (data.get('given_name') or data.get('family_name')):\n messages = [_('Family name or given name must be filled.')]\n raise ValidationError({'given_name': messages, 'family_name': messages})\nelif data['type'] == 'organizational':\n if not data.get('name'):\n messages = [_('Name cannot be blank.')]\n raise ValidationError({'name': messages})", "if data['type'] == 'personal':\n names = [data.get('family_name'), data.get('given_name')]\n data['name'] = ', '.join([n for n in names if n])\nelif data['type'] == 'organizational':\n if 'family_name' in data:\n del data['family_name']\n if 'given_name' in data:\n del data['given_name']\nreturn data"], "bodies_text": "<|body_start_0|>\n if data['type'] == 'personal':\n if not (data.get('given_name') or data.get('family_name')):\n messages = [_('Family name or given name must be filled.')]\n raise ValidationError({'given_name': messages, 'family_name': messages})\n elif data['type'] == 'organizational':\n if not data.get('name'):\n messages = [_('Name cannot be blank.')]\n raise ValidationError({'name': messages})\n<|end_body_0|>\n\n<|body_start_1|>\n if data['type'] == 'personal':\n names = [data.get('family_name'), data.get('given_name')]\n data['name'] = ', '.join([n for n in names if n])\n elif data['type'] == 'organizational':\n if 'family_name' in data:\n del data['family_name']\n if 'given_name' in data:\n del data['given_name']\n return data\n<|end_body_1|>\n", "class_docstring": "Person or Organization schema.", "class_name": "PersonOrOrganizationSchema", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PersonOrOrganizationSchema:\n \"\"\"Person or Organization schema.\"\"\"\n\n def validate_names(self, data, **kwargs):\n \"\"\"Validate names based on type.\"\"\"\n <|body_0|>\n\n def update_names(self, data, **kwargs):\n \"\"\"Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data['type'] == 'personal':\n if not (data.get('given_name') or data.get('family_name')):\n messages = [_('Family name or given name must be filled.')]\n raise ValidationError({'given_name': messages, 'family_name': messages})\n elif data['type'] == 'organizational':\n if not data.get('name'):\n messages = [_('Name cannot be blank.')]\n raise ValidationError({'name': messages})\n<|end_body_0|>\n\n<|body_start_1|>\n if data['type'] == 'personal':\n names = [data.get('family_name'), data.get('given_name')]\n data['name'] = ', '.join([n for n in names if n])\n elif data['type'] == 'organizational':\n if 'family_name' in data:\n del data['family_name']\n if 'given_name' in data:\n del data['given_name']\n return data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000148", "length_bytes": 13845, "license_type": "permissive", "methods": [{"docstring": "Validate names based on type.", "name": "validate_names", "signature": "def validate_names(self, data, **kwargs)"}, {"docstring": "Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.", "name": "update_names", "signature": "def update_names(self, data, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003570", "prompt": "Implement the Python class `PersonOrOrganizationSchema` described below.\n\nClass description:\nPerson or Organization schema.\n\nMethod signatures and docstrings:\n- def validate_names(self, data, **kwargs): Validate names based on type.\n- def update_names(self, data, **kwargs): Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.", "prompted_full_text": "Implement the Python class `PersonOrOrganizationSchema` described below.\n\nClass description:\nPerson or Organization schema.\n\nMethod signatures and docstrings:\n- def validate_names(self, data, **kwargs): Validate names based on type.\n- def update_names(self, data, **kwargs): Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.\n\n<|skeleton|>\nclass PersonOrOrganizationSchema:\n \"\"\"Person or Organization schema.\"\"\"\n\n def validate_names(self, data, **kwargs):\n \"\"\"Validate names based on type.\"\"\"\n <|body_0|>\n\n def update_names(self, data, **kwargs):\n \"\"\"Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data['type'] == 'personal':\n if not (data.get('given_name') or data.get('family_name')):\n messages = [_('Family name or given name must be filled.')]\n raise ValidationError({'given_name': messages, 'family_name': messages})\n elif data['type'] == 'organizational':\n if not data.get('name'):\n messages = [_('Name cannot be blank.')]\n raise ValidationError({'name': messages})\n<|end_body_0|>\n\n<|body_start_1|>\n if data['type'] == 'personal':\n names = [data.get('family_name'), data.get('given_name')]\n data['name'] = ', '.join([n for n in names if n])\n elif data['type'] == 'organizational':\n if 'family_name' in data:\n del data['family_name']\n if 'given_name' in data:\n del data['given_name']\n return data\n<|end_body_1|>\n", "revision_id": "78ad536dbb95494967bf8de248cf922e5040e844", "skeleton": "<|skeleton|>\nclass PersonOrOrganizationSchema:\n \"\"\"Person or Organization schema.\"\"\"\n\n def validate_names(self, data, **kwargs):\n \"\"\"Validate names based on type.\"\"\"\n <|body_0|>\n\n def update_names(self, data, **kwargs):\n \"\"\"Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PersonOrOrganizationSchema:\n \"\"\"Person or Organization schema.\"\"\"\n\n def validate_names(self, data, **kwargs):\n \"\"\"Validate names based on type.\"\"\"\n if data['type'] == 'personal':\n if not (data.get('given_name') or data.get('family_name')):\n messages = [_('Family name or given name must be filled.')]\n raise ValidationError({'given_name': messages, 'family_name': messages})\n elif data['type'] == 'organizational':\n if not data.get('name'):\n messages = [_('Name cannot be blank.')]\n raise ValidationError({'name': messages})\n\n def update_names(self, data, **kwargs):\n \"\"\"Update names for organization / person. Fill name from given_name and family_name if person. Remove given_name and family_name if organization.\"\"\"\n if data['type'] == 'personal':\n names = [data.get('family_name'), data.get('given_name')]\n data['name'] = ', '.join([n for n in names if n])\n elif data['type'] == 'organizational':\n if 'family_name' in data:\n del data['family_name']\n if 'given_name' in data:\n del data['given_name']\n return data\n", "source": "the_stack_v2_python_sparse", "source_path": "invenio_rdm_records/services/schemas/metadata.py", "source_repo": "tu-graz-library/invenio-rdm-records", "split": "val", "star_events_count": 0} {"blob_id": "45468e7a7f60f0a25a453b7be12ab91a254defda", "bodies": ["Button.__init__(self, 1, file, (0, 0), resize=size)\nself.pic = pygame.Surface(self.image.get_size())\nself.pic.blit(self.image, (0, 0))\nself.shades = {}\nself.initialize_shade('blue', (0, 0, 255), 150)\nself.initialize_shade('red', (255, 0, 0), 150)", "self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\nself.shades[shade_name][1].fill(shade_color)\nself.shades[shade_name][1].set_alpha(alpha)", "if self.shades[shade][0]:\n self.shades[shade][0] = 0\nelse:\n self.shades[shade][0] = 1\nself.image.blit(self.pic, (0, 0))\nfor key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1], (0, 0))"], "bodies_text": "<|body_start_0|>\n Button.__init__(self, 1, file, (0, 0), resize=size)\n self.pic = pygame.Surface(self.image.get_size())\n self.pic.blit(self.image, (0, 0))\n self.shades = {}\n self.initialize_shade('blue', (0, 0, 255), 150)\n self.initialize_shade('red', (255, 0, 0), 150)\n<|end_body_0|>\n\n<|body_start_1|>\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.shades[shade][0]:\n self.shades[shade][0] = 0\n else:\n self.shades[shade][0] = 1\n self.image.blit(self.pic, (0, 0))\n for key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1], (0, 0))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Tile", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Tile:\n\n def __init__(self, file, size):\n \"\"\"This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\"\"\"\n <|body_0|>\n\n def initialize_shade(self, shade_name, shade_color, alpha):\n \"\"\"This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\"\"\"\n <|body_1|>\n\n def toggle_shade(self, shade):\n \"\"\"This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Button.__init__(self, 1, file, (0, 0), resize=size)\n self.pic = pygame.Surface(self.image.get_size())\n self.pic.blit(self.image, (0, 0))\n self.shades = {}\n self.initialize_shade('blue', (0, 0, 255), 150)\n self.initialize_shade('red', (255, 0, 0), 150)\n<|end_body_0|>\n\n<|body_start_1|>\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.shades[shade][0]:\n self.shades[shade][0] = 0\n else:\n self.shades[shade][0] = 1\n self.image.blit(self.pic, (0, 0))\n for key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1], (0, 0))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000149", "length_bytes": 35070, "license_type": "no_license", "methods": [{"docstring": "This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)", "name": "__init__", "signature": "def __init__(self, file, size)"}, {"docstring": "This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)", "name": "initialize_shade", "signature": "def initialize_shade(self, shade_name, shade_color, alpha)"}, {"docstring": "This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)", "name": "toggle_shade", "signature": "def toggle_shade(self, shade)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000049", "prompt": "Implement the Python class `Tile` described below.\n\nClass description:\nImplement the Tile class.\n\nMethod signatures and docstrings:\n- def __init__(self, file, size): This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\n- def initialize_shade(self, shade_name, shade_color, alpha): This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\n- def toggle_shade(self, shade): This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)", "prompted_full_text": "Implement the Python class `Tile` described below.\n\nClass description:\nImplement the Tile class.\n\nMethod signatures and docstrings:\n- def __init__(self, file, size): This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\n- def initialize_shade(self, shade_name, shade_color, alpha): This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\n- def toggle_shade(self, shade): This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)\n\n<|skeleton|>\nclass Tile:\n\n def __init__(self, file, size):\n \"\"\"This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\"\"\"\n <|body_0|>\n\n def initialize_shade(self, shade_name, shade_color, alpha):\n \"\"\"This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\"\"\"\n <|body_1|>\n\n def toggle_shade(self, shade):\n \"\"\"This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Button.__init__(self, 1, file, (0, 0), resize=size)\n self.pic = pygame.Surface(self.image.get_size())\n self.pic.blit(self.image, (0, 0))\n self.shades = {}\n self.initialize_shade('blue', (0, 0, 255), 150)\n self.initialize_shade('red', (255, 0, 0), 150)\n<|end_body_0|>\n\n<|body_start_1|>\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.shades[shade][0]:\n self.shades[shade][0] = 0\n else:\n self.shades[shade][0] = 1\n self.image.blit(self.pic, (0, 0))\n for key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1], (0, 0))\n<|end_body_2|>\n", "revision_id": "3eae1428fdd30fddc66669d40b8bb0a715d5595a", "skeleton": "<|skeleton|>\nclass Tile:\n\n def __init__(self, file, size):\n \"\"\"This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\"\"\"\n <|body_0|>\n\n def initialize_shade(self, shade_name, shade_color, alpha):\n \"\"\"This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\"\"\"\n <|body_1|>\n\n def toggle_shade(self, shade):\n \"\"\"This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Tile:\n def __init__(self, file, size):\n \"\"\"This will load an image and resize it as specified. The class comes with shading features and can be used as a parent class for board game like tiles that need additional attributes. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: file - This is a string of the picture file name including extension size - This is a tuple containing the length and height of the tile (doc string updated ver 0.5)\"\"\"\n Button.__init__(self, 1, file, (0, 0), resize=size)\n self.pic = pygame.Surface(self.image.get_size())\n self.pic.blit(self.image, (0, 0))\n self.shades = {}\n self.initialize_shade('blue', (0, 0, 255), 150)\n self.initialize_shade('red', (255, 0, 0), 150)\n\n def initialize_shade(self, shade_name, shade_color, alpha):\n \"\"\"This method will create semi-transparent surfaces with a specified color. The surface can be toggled on and off. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: Shade_name - String of the name that you want to associate with the surface Shade_color - An rgb tuple of the color of the shade Alpha - Level of transparency of the shade (0-255 with 150 being a good middle value) (doc string updated ver 0.6)\"\"\"\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)\n\n def toggle_shade(self, shade):\n \"\"\"This method will overlay a semi-transparent shade on top of the tile's image. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: shade - This will designate which shade you wish to turn on or off. Blue and red shades are available by default. (doc string updated ver 0.6)\"\"\"\n if self.shades[shade][0]:\n self.shades[shade][0] = 0\n else:\n self.shades[shade][0] = 1\n self.image.blit(self.pic, (0, 0))\n for key in self.shades:\n if self.shades[key][0]:\n self.image.blit(self.shades[key][1], (0, 0))\n", "source": "the_stack_v2_python_sparse", "source_path": "Games/Torric's Quest/pygametools.py", "source_repo": "jbm950/personal_projects", "split": "val", "star_events_count": 0} {"blob_id": "27305e336eaf5d1bc015c9fdde7861eafcaec402", "bodies": ["super().__init__(cost_func)\nself.support_for_bounds = True\nself._popt = None\nself._status = None\nself._maxiter = None", "if self.minimizer == 'shgo':\n self._maxiter = 100\nelse:\n self._maxiter = 1000\nif self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError('SciPy GO requires finite bounds on all parameters')", "if self.minimizer == 'differential_evolution':\n kwargs = {'maxiter': self._maxiter}\nelif self.minimizer == 'shgo':\n kwargs = {'options': {'maxiter': self._maxiter, 'jac': self.cost_func.jac_cost}}\nelif self.minimizer == 'dual_annealing':\n kwargs = {'maxiter': self._maxiter, 'local_search_options': {'jac': self.cost_func.jac_cost}}\nfun = self.cost_func.eval_cost\nbounds = self.value_ranges\nalgorithm = getattr(optimize, self.minimizer)\nresult = algorithm(fun, bounds, **kwargs)\nself._popt = result.x\nif result.success:\n self._status = 0\nelif 'Maximum number of iteration' in result.message:\n self._status = 1\nelse:\n self._status = 2", "if self._status == 0:\n self.flag = 0\nelif self._status == 1:\n self.flag = 1\nelse:\n self.flag = 2\nself.final_params = self._popt"], "bodies_text": "<|body_start_0|>\n super().__init__(cost_func)\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.minimizer == 'shgo':\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError('SciPy GO requires finite bounds on all parameters')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.minimizer == 'differential_evolution':\n kwargs = {'maxiter': self._maxiter}\n elif self.minimizer == 'shgo':\n kwargs = {'options': {'maxiter': self._maxiter, 'jac': self.cost_func.jac_cost}}\n elif self.minimizer == 'dual_annealing':\n kwargs = {'maxiter': self._maxiter, 'local_search_options': {'jac': self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif 'Maximum number of iteration' in result.message:\n self._status = 1\n else:\n self._status = 2\n<|end_body_2|>\n\n<|body_start_3|>\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n self.final_params = self._popt\n<|end_body_3|>\n", "class_docstring": "Controller for the Scipy fitting software.", "class_name": "ScipyGOController", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScipyGOController:\n \"\"\"Controller for the Scipy fitting software.\"\"\"\n\n def __init__(self, cost_func):\n \"\"\"Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\"\"\"\n <|body_0|>\n\n def setup(self):\n \"\"\"Setup problem ready to be run with SciPy GO\"\"\"\n <|body_1|>\n\n def fit(self):\n \"\"\"Run problem with Scipy GO.\"\"\"\n <|body_2|>\n\n def cleanup(self):\n \"\"\"Convert the result to a numpy array and populate the variables results will be read from.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(cost_func)\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.minimizer == 'shgo':\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError('SciPy GO requires finite bounds on all parameters')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.minimizer == 'differential_evolution':\n kwargs = {'maxiter': self._maxiter}\n elif self.minimizer == 'shgo':\n kwargs = {'options': {'maxiter': self._maxiter, 'jac': self.cost_func.jac_cost}}\n elif self.minimizer == 'dual_annealing':\n kwargs = {'maxiter': self._maxiter, 'local_search_options': {'jac': self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif 'Maximum number of iteration' in result.message:\n self._status = 1\n else:\n self._status = 2\n<|end_body_2|>\n\n<|body_start_3|>\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n self.final_params = self._popt\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000150", "length_bytes": 3214, "license_type": "permissive", "methods": [{"docstring": "Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`", "name": "__init__", "signature": "def __init__(self, cost_func)"}, {"docstring": "Setup problem ready to be run with SciPy GO", "name": "setup", "signature": "def setup(self)"}, {"docstring": "Run problem with Scipy GO.", "name": "fit", "signature": "def fit(self)"}, {"docstring": "Convert the result to a numpy array and populate the variables results will be read from.", "name": "cleanup", "signature": "def cleanup(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004169", "prompt": "Implement the Python class `ScipyGOController` described below.\n\nClass description:\nController for the Scipy fitting software.\n\nMethod signatures and docstrings:\n- def __init__(self, cost_func): Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\n- def setup(self): Setup problem ready to be run with SciPy GO\n- def fit(self): Run problem with Scipy GO.\n- def cleanup(self): Convert the result to a numpy array and populate the variables results will be read from.", "prompted_full_text": "Implement the Python class `ScipyGOController` described below.\n\nClass description:\nController for the Scipy fitting software.\n\nMethod signatures and docstrings:\n- def __init__(self, cost_func): Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\n- def setup(self): Setup problem ready to be run with SciPy GO\n- def fit(self): Run problem with Scipy GO.\n- def cleanup(self): Convert the result to a numpy array and populate the variables results will be read from.\n\n<|skeleton|>\nclass ScipyGOController:\n \"\"\"Controller for the Scipy fitting software.\"\"\"\n\n def __init__(self, cost_func):\n \"\"\"Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\"\"\"\n <|body_0|>\n\n def setup(self):\n \"\"\"Setup problem ready to be run with SciPy GO\"\"\"\n <|body_1|>\n\n def fit(self):\n \"\"\"Run problem with Scipy GO.\"\"\"\n <|body_2|>\n\n def cleanup(self):\n \"\"\"Convert the result to a numpy array and populate the variables results will be read from.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(cost_func)\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.minimizer == 'shgo':\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError('SciPy GO requires finite bounds on all parameters')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.minimizer == 'differential_evolution':\n kwargs = {'maxiter': self._maxiter}\n elif self.minimizer == 'shgo':\n kwargs = {'options': {'maxiter': self._maxiter, 'jac': self.cost_func.jac_cost}}\n elif self.minimizer == 'dual_annealing':\n kwargs = {'maxiter': self._maxiter, 'local_search_options': {'jac': self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif 'Maximum number of iteration' in result.message:\n self._status = 1\n else:\n self._status = 2\n<|end_body_2|>\n\n<|body_start_3|>\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n self.final_params = self._popt\n<|end_body_3|>\n", "revision_id": "5ee7e66d963ebe9296c0a62c24b9616f6c65537e", "skeleton": "<|skeleton|>\nclass ScipyGOController:\n \"\"\"Controller for the Scipy fitting software.\"\"\"\n\n def __init__(self, cost_func):\n \"\"\"Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\"\"\"\n <|body_0|>\n\n def setup(self):\n \"\"\"Setup problem ready to be run with SciPy GO\"\"\"\n <|body_1|>\n\n def fit(self):\n \"\"\"Run problem with Scipy GO.\"\"\"\n <|body_2|>\n\n def cleanup(self):\n \"\"\"Convert the result to a numpy array and populate the variables results will be read from.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ScipyGOController:\n \"\"\"Controller for the Scipy fitting software.\"\"\"\n\n def __init__(self, cost_func):\n \"\"\"Initialises variable used for temporary storage. :param cost_func: Cost function object selected from options. :type cost_func: subclass of :class:`~fitbenchmarking.cost_func.base_cost_func.CostFunc`\"\"\"\n super().__init__(cost_func)\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None\n\n def setup(self):\n \"\"\"Setup problem ready to be run with SciPy GO\"\"\"\n if self.minimizer == 'shgo':\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError('SciPy GO requires finite bounds on all parameters')\n\n def fit(self):\n \"\"\"Run problem with Scipy GO.\"\"\"\n if self.minimizer == 'differential_evolution':\n kwargs = {'maxiter': self._maxiter}\n elif self.minimizer == 'shgo':\n kwargs = {'options': {'maxiter': self._maxiter, 'jac': self.cost_func.jac_cost}}\n elif self.minimizer == 'dual_annealing':\n kwargs = {'maxiter': self._maxiter, 'local_search_options': {'jac': self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif 'Maximum number of iteration' in result.message:\n self._status = 1\n else:\n self._status = 2\n\n def cleanup(self):\n \"\"\"Convert the result to a numpy array and populate the variables results will be read from.\"\"\"\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n self.final_params = self._popt\n", "source": "the_stack_v2_python_sparse", "source_path": "fitbenchmarking/controllers/scipy_go_controller.py", "source_repo": "fitbenchmarking/fitbenchmarking", "split": "val", "star_events_count": 15} {"blob_id": "69824654e35c61f2ccc415e647a04a23fdc4edf7", "bodies": ["count = 1\nfor k in range(2, int(math.sqrt(2 * N)) + 1):\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\nreturn count", "count, k = (1, 2)\nwhile N > k * (k - 1) // 2:\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n k += 1\nreturn count"], "bodies_text": "<|body_start_0|>\n count = 1\n for k in range(2, int(math.sqrt(2 * N)) + 1):\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n count, k = (1, 2)\n while N > k * (k - 1) // 2:\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n k += 1\n return count\n<|end_body_1|>\n", "class_docstring": "The thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"The thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\"\"\"\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 1\n for k in range(2, int(math.sqrt(2 * N)) + 1):\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n count, k = (1, 2)\n while N > k * (k - 1) // 2:\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n k += 1\n return count\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000151", "length_bytes": 1644, "license_type": "no_license", "methods": [{"docstring": ":type N: int :rtype: int", "name": "consecutiveNumbersSum", "signature": "def consecutiveNumbersSum(self, N)"}, {"docstring": ":type N: int :rtype: int", "name": "consecutiveNumbersSum", "signature": "def consecutiveNumbersSum(self, N)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006487", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nThe thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\n\nMethod signatures and docstrings:\n- def consecutiveNumbersSum(self, N): :type N: int :rtype: int\n- def consecutiveNumbersSum(self, N): :type N: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nThe thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\n\nMethod signatures and docstrings:\n- def consecutiveNumbersSum(self, N): :type N: int :rtype: int\n- def consecutiveNumbersSum(self, N): :type N: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n \"\"\"The thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\"\"\"\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = 1\n for k in range(2, int(math.sqrt(2 * N)) + 1):\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n count, k = (1, 2)\n while N > k * (k - 1) // 2:\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n k += 1\n return count\n<|end_body_1|>\n", "revision_id": "d953abe2c9680f636563e76287d2f907e90ced63", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"The thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\"\"\"\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n \"\"\"The thought process goes like this- Given a number N, we can possibly write it as a sum of 2 numbers, 3 numbers, 4 numbers and so on. Let's assume the fist number in this series be x. Hence, we should have x + (x+1) + (x+2)+...+ k terms = N kx + k*(k-1)/2 = N implies kx = N - k*(k-1)/2 So, we can calculate the RHS for every value of k and if it is a multiple of k then we can construct a sum of N using k terms starting from x. Now, the question arises, till what value of k should we loop for? That's easy. In the worst case, RHS should be greater than 0. That is N - k*(k-1)/2 > 0 which implies k*(k-1) < 2N which can be approximated to k*k < 2N ==> k < sqrt(2N) Hence the overall complexity of t\"\"\"\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n count = 1\n for k in range(2, int(math.sqrt(2 * N)) + 1):\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n return count\n\n def consecutiveNumbersSum(self, N):\n \"\"\":type N: int :rtype: int\"\"\"\n count, k = (1, 2)\n while N > k * (k - 1) // 2:\n if (N - k * (k - 1) / 2) % k == 0:\n count += 1\n k += 1\n return count\n", "source": "the_stack_v2_python_sparse", "source_path": "python_leetcode_2020/Python_Leetcode_2020/829_consecutive_numbers_sum.py", "source_repo": "xiangcao/Leetcode", "split": "val", "star_events_count": 0} {"blob_id": "d45662f4dd4be5a127e11579b8d510877b610a82", "bodies": ["self.ss = ss\nself.n_step = n_step\nself.mu = mu\nself.sigma = sigma\nself.step_time = step_time", "step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)])\nu = np.zeros(shape=dim)\nj = 0\nfor i in range(len(t)):\n if t[i] % self.step_time == 0 and t[i] != 0 and (j + 1 != len(step_vector)) and (i != len(t) - 1):\n j += 1\n if self.ss is not None and j == 0:\n u[i, :] = self.ss\n else:\n u[i, :] = step_vector[j]\nreturn u"], "bodies_text": "<|body_start_0|>\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time\n<|end_body_0|>\n\n<|body_start_1|>\n step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)])\n u = np.zeros(shape=dim)\n j = 0\n for i in range(len(t)):\n if t[i] % self.step_time == 0 and t[i] != 0 and (j + 1 != len(step_vector)) and (i != len(t) - 1):\n j += 1\n if self.ss is not None and j == 0:\n u[i, :] = self.ss\n else:\n u[i, :] = step_vector[j]\n return u\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GaussStep", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GaussStep:\n\n def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n \"\"\"Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\"\"\"\n <|body_0|>\n\n def out(self, t: any, dim=(None, None)) -> any:\n \"\"\"Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time\n<|end_body_0|>\n\n<|body_start_1|>\n step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)])\n u = np.zeros(shape=dim)\n j = 0\n for i in range(len(t)):\n if t[i] % self.step_time == 0 and t[i] != 0 and (j + 1 != len(step_vector)) and (i != len(t) - 1):\n j += 1\n if self.ss is not None and j == 0:\n u[i, :] = self.ss\n else:\n u[i, :] = step_vector[j]\n return u\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000152", "length_bytes": 8036, "license_type": "no_license", "methods": [{"docstring": "Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.", "name": "__init__", "signature": "def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None)"}, {"docstring": "Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.", "name": "out", "signature": "def out(self, t: any, dim=(None, None)) -> any"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000356", "prompt": "Implement the Python class `GaussStep` described below.\n\nClass description:\nImplement the GaussStep class.\n\nMethod signatures and docstrings:\n- def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None): Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\n- def out(self, t: any, dim=(None, None)) -> any: Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.", "prompted_full_text": "Implement the Python class `GaussStep` described below.\n\nClass description:\nImplement the GaussStep class.\n\nMethod signatures and docstrings:\n- def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None): Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\n- def out(self, t: any, dim=(None, None)) -> any: Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.\n\n<|skeleton|>\nclass GaussStep:\n\n def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n \"\"\"Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\"\"\"\n <|body_0|>\n\n def out(self, t: any, dim=(None, None)) -> any:\n \"\"\"Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time\n<|end_body_0|>\n\n<|body_start_1|>\n step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)])\n u = np.zeros(shape=dim)\n j = 0\n for i in range(len(t)):\n if t[i] % self.step_time == 0 and t[i] != 0 and (j + 1 != len(step_vector)) and (i != len(t) - 1):\n j += 1\n if self.ss is not None and j == 0:\n u[i, :] = self.ss\n else:\n u[i, :] = step_vector[j]\n return u\n<|end_body_1|>\n", "revision_id": "cf548475295f25407ba968546c2fc85c26f9343c", "skeleton": "<|skeleton|>\nclass GaussStep:\n\n def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n \"\"\"Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\"\"\"\n <|body_0|>\n\n def out(self, t: any, dim=(None, None)) -> any:\n \"\"\"Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GaussStep:\n def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n \"\"\"Settings for a Gauss step sequence Args: mu (float) sigma (float) step_time: Time to perform step change n_step (int): Number of steps Notes: Preferred signal for closed-loop control training data set.\"\"\"\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time\n\n def out(self, t: any, dim=(None, None)) -> any:\n \"\"\"Generate a Gauss sequence Args: dim: Dimension tuple in form (samples, params) t: Time vector Returns: array_like: Signal sequence corresponding to the time vector.\"\"\"\n step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)])\n u = np.zeros(shape=dim)\n j = 0\n for i in range(len(t)):\n if t[i] % self.step_time == 0 and t[i] != 0 and (j + 1 != len(step_vector)) and (i != len(t) - 1):\n j += 1\n if self.ss is not None and j == 0:\n u[i, :] = self.ss\n else:\n u[i, :] = step_vector[j]\n return u\n", "source": "the_stack_v2_python_sparse", "source_path": "SourceCode/simulation/signal.py", "source_repo": "martin-bachorik/Master-Thesis-Project", "split": "val", "star_events_count": 0} {"blob_id": "11a346f67d9c8e1eb6bd6abc070e6be9524159da", "bodies": ["if self.has_key(item):\n self[item].insert(nr, issue)\nelse:\n self[item] = [issue]\nreturn 1", "if self.has_key(item):\n self[item].append(issue)\nelse:\n self[item] = [issue]\nreturn 1", "if self.has_key(item):\n if issue in self[item]:\n return 0\nself.add(item, issue)\nreturn 1", "number = int(number)\nif self.has_key(item):\n try:\n del self[item][number]\n return 1\n except IndexError:\n return None", "try:\n self[item].remove(issue)\n return 1\nexcept ValueError:\n pass", "try:\n if issue in self[item]:\n return 1\n else:\n return None\nexcept KeyError:\n pass"], "bodies_text": "<|body_start_0|>\n if self.has_key(item):\n self[item].insert(nr, issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_key(item):\n if issue in self[item]:\n return 0\n self.add(item, issue)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n number = int(number)\n if self.has_key(item):\n try:\n del self[item][number]\n return 1\n except IndexError:\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n self[item].remove(issue)\n return 1\n except ValueError:\n pass\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n if issue in self[item]:\n return 1\n else:\n return None\n except KeyError:\n pass\n<|end_body_5|>\n", "class_docstring": "dol is dict of lists", "class_name": "Dol", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Dol:\n \"\"\"dol is dict of lists\"\"\"\n\n def insert(self, nr, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_0|>\n\n def add(self, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_1|>\n\n def adduniq(self, item, issue):\n \"\"\"only add issue to item if it is not already there\"\"\"\n <|body_2|>\n\n def delete(self, item, number):\n \"\"\"del self[item][number]\"\"\"\n <|body_3|>\n\n def remove(self, item, issue):\n \"\"\"remove issue from item\"\"\"\n <|body_4|>\n\n def has(self, item, issue):\n \"\"\"check if item has issue\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_key(item):\n self[item].insert(nr, issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_key(item):\n if issue in self[item]:\n return 0\n self.add(item, issue)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n number = int(number)\n if self.has_key(item):\n try:\n del self[item][number]\n return 1\n except IndexError:\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n self[item].remove(issue)\n return 1\n except ValueError:\n pass\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n if issue in self[item]:\n return 1\n else:\n return None\n except KeyError:\n pass\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000153", "length_bytes": 1526, "license_type": "no_license", "methods": [{"docstring": "add issue to item entry", "name": "insert", "signature": "def insert(self, nr, item, issue)"}, {"docstring": "add issue to item entry", "name": "add", "signature": "def add(self, item, issue)"}, {"docstring": "only add issue to item if it is not already there", "name": "adduniq", "signature": "def adduniq(self, item, issue)"}, {"docstring": "del self[item][number]", "name": "delete", "signature": "def delete(self, item, number)"}, {"docstring": "remove issue from item", "name": "remove", "signature": "def remove(self, item, issue)"}, {"docstring": "check if item has issue", "name": "has", "signature": "def has(self, item, issue)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_005049", "prompt": "Implement the Python class `Dol` described below.\n\nClass description:\ndol is dict of lists\n\nMethod signatures and docstrings:\n- def insert(self, nr, item, issue): add issue to item entry\n- def add(self, item, issue): add issue to item entry\n- def adduniq(self, item, issue): only add issue to item if it is not already there\n- def delete(self, item, number): del self[item][number]\n- def remove(self, item, issue): remove issue from item\n- def has(self, item, issue): check if item has issue", "prompted_full_text": "Implement the Python class `Dol` described below.\n\nClass description:\ndol is dict of lists\n\nMethod signatures and docstrings:\n- def insert(self, nr, item, issue): add issue to item entry\n- def add(self, item, issue): add issue to item entry\n- def adduniq(self, item, issue): only add issue to item if it is not already there\n- def delete(self, item, number): del self[item][number]\n- def remove(self, item, issue): remove issue from item\n- def has(self, item, issue): check if item has issue\n\n<|skeleton|>\nclass Dol:\n \"\"\"dol is dict of lists\"\"\"\n\n def insert(self, nr, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_0|>\n\n def add(self, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_1|>\n\n def adduniq(self, item, issue):\n \"\"\"only add issue to item if it is not already there\"\"\"\n <|body_2|>\n\n def delete(self, item, number):\n \"\"\"del self[item][number]\"\"\"\n <|body_3|>\n\n def remove(self, item, issue):\n \"\"\"remove issue from item\"\"\"\n <|body_4|>\n\n def has(self, item, issue):\n \"\"\"check if item has issue\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_key(item):\n self[item].insert(nr, issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_key(item):\n if issue in self[item]:\n return 0\n self.add(item, issue)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n number = int(number)\n if self.has_key(item):\n try:\n del self[item][number]\n return 1\n except IndexError:\n return None\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n self[item].remove(issue)\n return 1\n except ValueError:\n pass\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n if issue in self[item]:\n return 1\n else:\n return None\n except KeyError:\n pass\n<|end_body_5|>\n", "revision_id": "ea86f2b7713457fc7a73f1227b969b230debda48", "skeleton": "<|skeleton|>\nclass Dol:\n \"\"\"dol is dict of lists\"\"\"\n\n def insert(self, nr, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_0|>\n\n def add(self, item, issue):\n \"\"\"add issue to item entry\"\"\"\n <|body_1|>\n\n def adduniq(self, item, issue):\n \"\"\"only add issue to item if it is not already there\"\"\"\n <|body_2|>\n\n def delete(self, item, number):\n \"\"\"del self[item][number]\"\"\"\n <|body_3|>\n\n def remove(self, item, issue):\n \"\"\"remove issue from item\"\"\"\n <|body_4|>\n\n def has(self, item, issue):\n \"\"\"check if item has issue\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Dol:\n \"\"\"dol is dict of lists\"\"\"\n\n def insert(self, nr, item, issue):\n \"\"\"add issue to item entry\"\"\"\n if self.has_key(item):\n self[item].insert(nr, issue)\n else:\n self[item] = [issue]\n return 1\n\n def add(self, item, issue):\n \"\"\"add issue to item entry\"\"\"\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1\n\n def adduniq(self, item, issue):\n \"\"\"only add issue to item if it is not already there\"\"\"\n if self.has_key(item):\n if issue in self[item]:\n return 0\n self.add(item, issue)\n return 1\n\n def delete(self, item, number):\n \"\"\"del self[item][number]\"\"\"\n number = int(number)\n if self.has_key(item):\n try:\n del self[item][number]\n return 1\n except IndexError:\n return None\n\n def remove(self, item, issue):\n \"\"\"remove issue from item\"\"\"\n try:\n self[item].remove(issue)\n return 1\n except ValueError:\n pass\n\n def has(self, item, issue):\n \"\"\"check if item has issue\"\"\"\n try:\n if issue in self[item]:\n return 1\n else:\n return None\n except KeyError:\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "gozerbot/utils/dol.py", "source_repo": "polichism/my-gozerbot", "split": "val", "star_events_count": 0} {"blob_id": "fa7d858f7fee681225a3beb988b0c75302557181", "bodies": ["self.env.revert_snapshot('ready_with_3_slaves')\nself.prepare_plugin()\nself.helpers.create_cluster(name=self.__class__.__name__)\nself.activate_plugin()", "self.check_run('deploy_influxdb_grafana')\nself.env.revert_snapshot('ready_with_3_slaves')\nself.prepare_plugin()\nself.helpers.create_cluster(name=self.__class__.__name__)\nself.activate_plugin()\nself.helpers.deploy_cluster(self.base_nodes)\nself.check_plugin_online()\nself.helpers.run_ostf()\nself.env.make_snapshot('deploy_influxdb_grafana', is_make=True)", "self.check_run('deploy_ha_influxdb_grafana')\nself.env.revert_snapshot('ready_with_9_slaves')\nself.prepare_plugin()\nself.helpers.create_cluster(name=self.__class__.__name__)\nself.activate_plugin()\nself.helpers.deploy_cluster(self.full_ha_nodes)\nself.check_plugin_online()\nself.helpers.run_ostf()\nself.env.make_snapshot('deploy_ha_influxdb_grafana', is_make=True)", "self.env.revert_snapshot('ready_with_3_slaves')\nself.prepare_plugin()\nself.uninstall_plugin()", "self.env.revert_snapshot('deploy_influxdb_grafana')\nself.check_uninstall_failure()\nself.fuel_web.delete_env_wait(self.helpers.cluster_id)\nself.uninstall_plugin()"], "bodies_text": "<|body_start_0|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n<|end_body_0|>\n\n<|body_start_1|>\n self.check_run('deploy_influxdb_grafana')\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.base_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_influxdb_grafana', is_make=True)\n<|end_body_1|>\n\n<|body_start_2|>\n self.check_run('deploy_ha_influxdb_grafana')\n self.env.revert_snapshot('ready_with_9_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.full_ha_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_ha_influxdb_grafana', is_make=True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.uninstall_plugin()\n<|end_body_3|>\n\n<|body_start_4|>\n self.env.revert_snapshot('deploy_influxdb_grafana')\n self.check_uninstall_failure()\n self.fuel_web.delete_env_wait(self.helpers.cluster_id)\n self.uninstall_plugin()\n<|end_body_4|>\n", "class_docstring": "Class for smoke testing the InfluxDB-Grafana plugin.", "class_name": "TestInfluxdbPlugin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestInfluxdbPlugin:\n \"\"\"Class for smoke testing the InfluxDB-Grafana plugin.\"\"\"\n\n def install_influxdb_grafana(self):\n \"\"\"Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\"\"\"\n <|body_0|>\n\n def deploy_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\"\"\"\n <|body_1|>\n\n def deploy_ha_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\"\"\"\n <|body_2|>\n\n def uninstall_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\"\"\"\n <|body_3|>\n\n def uninstall_deployed_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n<|end_body_0|>\n\n<|body_start_1|>\n self.check_run('deploy_influxdb_grafana')\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.base_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_influxdb_grafana', is_make=True)\n<|end_body_1|>\n\n<|body_start_2|>\n self.check_run('deploy_ha_influxdb_grafana')\n self.env.revert_snapshot('ready_with_9_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.full_ha_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_ha_influxdb_grafana', is_make=True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.uninstall_plugin()\n<|end_body_3|>\n\n<|body_start_4|>\n self.env.revert_snapshot('deploy_influxdb_grafana')\n self.check_uninstall_failure()\n self.fuel_web.delete_env_wait(self.helpers.cluster_id)\n self.uninstall_plugin()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000154", "length_bytes": 5396, "license_type": "no_license", "methods": [{"docstring": "Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m", "name": "install_influxdb_grafana", "signature": "def install_influxdb_grafana(self)"}, {"docstring": "Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana", "name": "deploy_influxdb_grafana", "signature": "def deploy_influxdb_grafana(self)"}, {"docstring": "Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana", "name": "deploy_ha_influxdb_grafana", "signature": "def deploy_ha_influxdb_grafana(self)"}, {"docstring": "Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m", "name": "uninstall_influxdb_grafana", "signature": "def uninstall_influxdb_grafana(self)"}, {"docstring": "Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m", "name": "uninstall_deployed_influxdb_grafana", "signature": "def uninstall_deployed_influxdb_grafana(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_val_000201", "prompt": "Implement the Python class `TestInfluxdbPlugin` described below.\n\nClass description:\nClass for smoke testing the InfluxDB-Grafana plugin.\n\nMethod signatures and docstrings:\n- def install_influxdb_grafana(self): Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\n- def deploy_influxdb_grafana(self): Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\n- def deploy_ha_influxdb_grafana(self): Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\n- def uninstall_influxdb_grafana(self): Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\n- def uninstall_deployed_influxdb_grafana(self): Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m", "prompted_full_text": "Implement the Python class `TestInfluxdbPlugin` described below.\n\nClass description:\nClass for smoke testing the InfluxDB-Grafana plugin.\n\nMethod signatures and docstrings:\n- def install_influxdb_grafana(self): Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\n- def deploy_influxdb_grafana(self): Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\n- def deploy_ha_influxdb_grafana(self): Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\n- def uninstall_influxdb_grafana(self): Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\n- def uninstall_deployed_influxdb_grafana(self): Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m\n\n<|skeleton|>\nclass TestInfluxdbPlugin:\n \"\"\"Class for smoke testing the InfluxDB-Grafana plugin.\"\"\"\n\n def install_influxdb_grafana(self):\n \"\"\"Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\"\"\"\n <|body_0|>\n\n def deploy_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\"\"\"\n <|body_1|>\n\n def deploy_ha_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\"\"\"\n <|body_2|>\n\n def uninstall_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\"\"\"\n <|body_3|>\n\n def uninstall_deployed_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n<|end_body_0|>\n\n<|body_start_1|>\n self.check_run('deploy_influxdb_grafana')\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.base_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_influxdb_grafana', is_make=True)\n<|end_body_1|>\n\n<|body_start_2|>\n self.check_run('deploy_ha_influxdb_grafana')\n self.env.revert_snapshot('ready_with_9_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.full_ha_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_ha_influxdb_grafana', is_make=True)\n<|end_body_2|>\n\n<|body_start_3|>\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.uninstall_plugin()\n<|end_body_3|>\n\n<|body_start_4|>\n self.env.revert_snapshot('deploy_influxdb_grafana')\n self.check_uninstall_failure()\n self.fuel_web.delete_env_wait(self.helpers.cluster_id)\n self.uninstall_plugin()\n<|end_body_4|>\n", "revision_id": "179249df2d206eeabb3955c9dc8cb78cac3c36c6", "skeleton": "<|skeleton|>\nclass TestInfluxdbPlugin:\n \"\"\"Class for smoke testing the InfluxDB-Grafana plugin.\"\"\"\n\n def install_influxdb_grafana(self):\n \"\"\"Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\"\"\"\n <|body_0|>\n\n def deploy_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\"\"\"\n <|body_1|>\n\n def deploy_ha_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\"\"\"\n <|body_2|>\n\n def uninstall_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\"\"\"\n <|body_3|>\n\n def uninstall_deployed_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestInfluxdbPlugin:\n \"\"\"Class for smoke testing the InfluxDB-Grafana plugin.\"\"\"\n\n def install_influxdb_grafana(self):\n \"\"\"Install InfluxDB-Grafana plugin and check it exists Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create a cluster 4. Check that the plugin can be enabled Duration 20m\"\"\"\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n\n def deploy_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 1 node with controller role 5. Add 1 node with compute and cinder roles 6. Add 1 node with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 60m Snapshot deploy_influxdb_grafana\"\"\"\n self.check_run('deploy_influxdb_grafana')\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.base_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_influxdb_grafana', is_make=True)\n\n def deploy_ha_influxdb_grafana(self):\n \"\"\"Deploy a cluster with the InfluxDB-Grafana plugin in HA mode Scenario: 1. Upload the InfluxDB/Grafana plugin to the master node 2. Install the plugin 3. Create the cluster 4. Add 3 nodes with controller role 5. Add 3 nodes with compute and cinder roles 6. Add 3 nodes with influxdb_grafana role 7. Deploy the cluster 8. Check that InfluxDB/Grafana are running 9. Run OSTF Duration 120m Snapshot deploy_ha_influxdb_grafana\"\"\"\n self.check_run('deploy_ha_influxdb_grafana')\n self.env.revert_snapshot('ready_with_9_slaves')\n self.prepare_plugin()\n self.helpers.create_cluster(name=self.__class__.__name__)\n self.activate_plugin()\n self.helpers.deploy_cluster(self.full_ha_nodes)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.env.make_snapshot('deploy_ha_influxdb_grafana', is_make=True)\n\n def uninstall_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin Scenario: 1. Install the plugin. 2. Remove the plugin. Duration 5m\"\"\"\n self.env.revert_snapshot('ready_with_3_slaves')\n self.prepare_plugin()\n self.uninstall_plugin()\n\n def uninstall_deployed_influxdb_grafana(self):\n \"\"\"Uninstall the InfluxDB-Grafana plugin with a deployed environment Scenario: 1. Try to remove the plugin using the Fuel CLI 2. Check plugin can't be uninstalled on deployed cluster. 3. Remove the environment. 4. Remove the plugin. Duration 20m\"\"\"\n self.env.revert_snapshot('deploy_influxdb_grafana')\n self.check_uninstall_failure()\n self.fuel_web.delete_env_wait(self.helpers.cluster_id)\n self.uninstall_plugin()\n", "source": "the_stack_v2_python_sparse", "source_path": "stacklight_tests/influxdb_grafana/test_smoke_bvt.py", "source_repo": "rkhozinov/stacklight-integration-tests", "split": "val", "star_events_count": 1} {"blob_id": "1ae7e8e16ce85d382e8cb412e4f166a8f0ce2602", "bodies": ["cnt = 0\np = -1\ns = -1\nfor i, n in enumerate(A):\n if n > R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n s = i\n p = -1\n elif L <= n <= R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n cnt += i - s\n p = i\nif p >= 0:\n cnt += (len(A) - p - 1) * (p - s)\nreturn cnt", "if not nums:\n return 0\ntree = SegmentTree(nums, merge=max)\ncnt = 0\nfor i in range(len(nums)):\n for j in range(i, len(nums)):\n if left <= tree.query(i, j) <= right:\n cnt += 1\nreturn cnt", "cnt = 0\np = -1\ns = -1\nfor i, n in enumerate(nums):\n if n < left:\n if p >= 0:\n cnt += p - s\n elif left <= n <= right:\n cnt += i - s\n p = i\n else:\n p = -1\n s = i\nreturn cnt"], "bodies_text": "<|body_start_0|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(A):\n if n > R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n s = i\n p = -1\n elif L <= n <= R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n cnt += i - s\n p = i\n if p >= 0:\n cnt += (len(A) - p - 1) * (p - s)\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n tree = SegmentTree(nums, merge=max)\n cnt = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n if left <= tree.query(i, j) <= right:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(nums):\n if n < left:\n if p >= 0:\n cnt += p - s\n elif left <= n <= right:\n cnt += i - s\n p = i\n else:\n p = -1\n s = i\n return cnt\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:\n \"\"\"09/17/2020 16:36\"\"\"\n <|body_0|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Segment tree. TLE Time complexity: O(n^2 * log(n))\"\"\"\n <|body_1|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(A):\n if n > R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n s = i\n p = -1\n elif L <= n <= R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n cnt += i - s\n p = i\n if p >= 0:\n cnt += (len(A) - p - 1) * (p - s)\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n tree = SegmentTree(nums, merge=max)\n cnt = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n if left <= tree.query(i, j) <= right:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(nums):\n if n < left:\n if p >= 0:\n cnt += p - s\n elif left <= n <= right:\n cnt += i - s\n p = i\n else:\n p = -1\n s = i\n return cnt\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000155", "length_bytes": 4112, "license_type": "no_license", "methods": [{"docstring": "09/17/2020 16:36", "name": "numSubarrayBoundedMax", "signature": "def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int"}, {"docstring": "Segment tree. TLE Time complexity: O(n^2 * log(n))", "name": "numSubarrayBoundedMax", "signature": "def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int"}, {"docstring": "Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)", "name": "numSubarrayBoundedMax", "signature": "def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int: 09/17/2020 16:36\n- def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int: Segment tree. TLE Time complexity: O(n^2 * log(n))\n- def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int: Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int: 09/17/2020 16:36\n- def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int: Segment tree. TLE Time complexity: O(n^2 * log(n))\n- def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int: Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)\n\n<|skeleton|>\nclass Solution:\n\n def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:\n \"\"\"09/17/2020 16:36\"\"\"\n <|body_0|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Segment tree. TLE Time complexity: O(n^2 * log(n))\"\"\"\n <|body_1|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(A):\n if n > R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n s = i\n p = -1\n elif L <= n <= R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n cnt += i - s\n p = i\n if p >= 0:\n cnt += (len(A) - p - 1) * (p - s)\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n tree = SegmentTree(nums, merge=max)\n cnt = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n if left <= tree.query(i, j) <= right:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(nums):\n if n < left:\n if p >= 0:\n cnt += p - s\n elif left <= n <= right:\n cnt += i - s\n p = i\n else:\n p = -1\n s = i\n return cnt\n<|end_body_2|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:\n \"\"\"09/17/2020 16:36\"\"\"\n <|body_0|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Segment tree. TLE Time complexity: O(n^2 * log(n))\"\"\"\n <|body_1|>\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:\n \"\"\"09/17/2020 16:36\"\"\"\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(A):\n if n > R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n s = i\n p = -1\n elif L <= n <= R:\n if p >= 0:\n cnt += (i - p - 1) * (p - s)\n cnt += i - s\n p = i\n if p >= 0:\n cnt += (len(A) - p - 1) * (p - s)\n return cnt\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Segment tree. TLE Time complexity: O(n^2 * log(n))\"\"\"\n if not nums:\n return 0\n tree = SegmentTree(nums, merge=max)\n cnt = 0\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n if left <= tree.query(i, j) <= right:\n cnt += 1\n return cnt\n\n def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:\n \"\"\"Count the number of valid intervals that end at i If ith number is smaller than `left`, possible starting indexes are between the last index whose value is larger than the `right` and the last index whose value is between `left` and `right`. If ith number is between `left` and `right`, any indexes before i can be starting index. If ith number is larget than `right`, non is possible. Need to keep the last index whose value is between `left` and `right` and the last index whoe value is larger than `right` Time complexity: O(n)\"\"\"\n cnt = 0\n p = -1\n s = -1\n for i, n in enumerate(nums):\n if n < left:\n if p >= 0:\n cnt += p - s\n elif left <= n <= right:\n cnt += i - s\n p = i\n else:\n p = -1\n s = i\n return cnt\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/811_Number_of_Subarrays_with_Bounded_Maximum/solution.py", "source_repo": "sungminoh/algorithms", "split": "val", "star_events_count": 0} {"blob_id": "a0554d2ddbd412dedde8bf2210634a9f14447df1", "bodies": ["pre = None\ncur = head\nwhile cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\nreturn pre", "n_l1, n_l2 = (self.reverseList(l1), self.reverseList(l2))\ncarry = 0\ndummy = cur = ListNode(0)\nwhile n_l1 or n_l2:\n if not n_l1:\n val1 = 0\n else:\n val1 = n_l1.val\n if not n_l2:\n val2 = 0\n else:\n val2 = n_l2.val\n val = val1 + val2 + carry\n if val >= 10:\n carry = 1\n else:\n carry = 0\n val = val % 10\n cur.next = ListNode(val)\n cur = cur.next\n if n_l1:\n n_l1 = n_l1.next\n if n_l2:\n n_l2 = n_l2.next\nif carry:\n cur.next = ListNode(1)\nres = self.reverseList(dummy.next)\nreturn res"], "bodies_text": "<|body_start_0|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_0|>\n\n<|body_start_1|>\n n_l1, n_l2 = (self.reverseList(l1), self.reverseList(l2))\n carry = 0\n dummy = cur = ListNode(0)\n while n_l1 or n_l2:\n if not n_l1:\n val1 = 0\n else:\n val1 = n_l1.val\n if not n_l2:\n val2 = 0\n else:\n val2 = n_l2.val\n val = val1 + val2 + carry\n if val >= 10:\n carry = 1\n else:\n carry = 0\n val = val % 10\n cur.next = ListNode(val)\n cur = cur.next\n if n_l1:\n n_l1 = n_l1.next\n if n_l2:\n n_l2 = n_l2.next\n if carry:\n cur.next = ListNode(1)\n res = self.reverseList(dummy.next)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_0|>\n\n<|body_start_1|>\n n_l1, n_l2 = (self.reverseList(l1), self.reverseList(l2))\n carry = 0\n dummy = cur = ListNode(0)\n while n_l1 or n_l2:\n if not n_l1:\n val1 = 0\n else:\n val1 = n_l1.val\n if not n_l2:\n val2 = 0\n else:\n val2 = n_l2.val\n val = val1 + val2 + carry\n if val >= 10:\n carry = 1\n else:\n carry = 0\n val = val % 10\n cur.next = ListNode(val)\n cur = cur.next\n if n_l1:\n n_l1 = n_l1.next\n if n_l2:\n n_l2 = n_l2.next\n if carry:\n cur.next = ListNode(1)\n res = self.reverseList(dummy.next)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000156", "length_bytes": 1941, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :rtype: ListNode", "name": "reverseList", "signature": "def reverseList(self, head)"}, {"docstring": ":type l1: ListNode :type l2: ListNode :rtype: ListNode", "name": "addTwoNumbers", "signature": "def addTwoNumbers(self, l1, l2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000945", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head): :type head: ListNode :rtype: ListNode\n- def addTwoNumbers(self, l1, l2): :type l1: ListNode :type l2: ListNode :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head): :type head: ListNode :rtype: ListNode\n- def addTwoNumbers(self, l1, l2): :type l1: ListNode :type l2: ListNode :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_0|>\n\n<|body_start_1|>\n n_l1, n_l2 = (self.reverseList(l1), self.reverseList(l2))\n carry = 0\n dummy = cur = ListNode(0)\n while n_l1 or n_l2:\n if not n_l1:\n val1 = 0\n else:\n val1 = n_l1.val\n if not n_l2:\n val2 = 0\n else:\n val2 = n_l2.val\n val = val1 + val2 + carry\n if val >= 10:\n carry = 1\n else:\n carry = 0\n val = val % 10\n cur.next = ListNode(val)\n cur = cur.next\n if n_l1:\n n_l1 = n_l1.next\n if n_l2:\n n_l2 = n_l2.next\n if carry:\n cur.next = ListNode(1)\n res = self.reverseList(dummy.next)\n return res\n<|end_body_1|>\n", "revision_id": "6e18c5d257840489cc3fb1079ae3804c743982a4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n n_l1, n_l2 = (self.reverseList(l1), self.reverseList(l2))\n carry = 0\n dummy = cur = ListNode(0)\n while n_l1 or n_l2:\n if not n_l1:\n val1 = 0\n else:\n val1 = n_l1.val\n if not n_l2:\n val2 = 0\n else:\n val2 = n_l2.val\n val = val1 + val2 + carry\n if val >= 10:\n carry = 1\n else:\n carry = 0\n val = val % 10\n cur.next = ListNode(val)\n cur = cur.next\n if n_l1:\n n_l1 = n_l1.next\n if n_l2:\n n_l2 = n_l2.next\n if carry:\n cur.next = ListNode(1)\n res = self.reverseList(dummy.next)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "剑指 Offer II 025. 链表中的两数相加.py", "source_repo": "yangyuxiang1996/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "669d87757f3be036c1f9421489221e332c9a2d63", "bodies": ["insts = Tenant.objects.all()\nlist_insts = []\nfor tenant in insts:\n list_insts.append({'id': tenant.id, 'name': tenant.name, 'ctime': tenant.create_time, 'space_quota': TenantQuota.objects.get_or_none(tenant=tenant), 'space_usage': get_tenant_space_usage(tenant)})\nreturn api_response(data={'insts': list_insts})", "inst_name = request.POST.get('name').strip()\nif not inst_name:\n return api_error(code=400, msg=_('Tenant name is required.'))\ntry:\n Tenant.objects.get(name=inst_name)\n return api_error(code=400, msg=_('Tenant existed. Please choose another name for the tenant.'))\nexcept Tenant.DoesNotExist:\n pass\ntry:\n Tenant.objects.create(name=inst_name)\n return api_response(code=200, msg=_('New tenant created successfully.'))\nexcept Exception as e:\n logger.error(e)\n return api_error(code=500, msg=_('Internal server error.'))"], "bodies_text": "<|body_start_0|>\n insts = Tenant.objects.all()\n list_insts = []\n for tenant in insts:\n list_insts.append({'id': tenant.id, 'name': tenant.name, 'ctime': tenant.create_time, 'space_quota': TenantQuota.objects.get_or_none(tenant=tenant), 'space_usage': get_tenant_space_usage(tenant)})\n return api_response(data={'insts': list_insts})\n<|end_body_0|>\n\n<|body_start_1|>\n inst_name = request.POST.get('name').strip()\n if not inst_name:\n return api_error(code=400, msg=_('Tenant name is required.'))\n try:\n Tenant.objects.get(name=inst_name)\n return api_error(code=400, msg=_('Tenant existed. Please choose another name for the tenant.'))\n except Tenant.DoesNotExist:\n pass\n try:\n Tenant.objects.create(name=inst_name)\n return api_response(code=200, msg=_('New tenant created successfully.'))\n except Exception as e:\n logger.error(e)\n return api_error(code=500, msg=_('Internal server error.'))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AdminTenants", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdminTenants:\n\n def get(self, request):\n \"\"\"Get all tenants\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new tenant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n insts = Tenant.objects.all()\n list_insts = []\n for tenant in insts:\n list_insts.append({'id': tenant.id, 'name': tenant.name, 'ctime': tenant.create_time, 'space_quota': TenantQuota.objects.get_or_none(tenant=tenant), 'space_usage': get_tenant_space_usage(tenant)})\n return api_response(data={'insts': list_insts})\n<|end_body_0|>\n\n<|body_start_1|>\n inst_name = request.POST.get('name').strip()\n if not inst_name:\n return api_error(code=400, msg=_('Tenant name is required.'))\n try:\n Tenant.objects.get(name=inst_name)\n return api_error(code=400, msg=_('Tenant existed. Please choose another name for the tenant.'))\n except Tenant.DoesNotExist:\n pass\n try:\n Tenant.objects.create(name=inst_name)\n return api_response(code=200, msg=_('New tenant created successfully.'))\n except Exception as e:\n logger.error(e)\n return api_error(code=500, msg=_('Internal server error.'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000157", "length_bytes": 34523, "license_type": "permissive", "methods": [{"docstring": "Get all tenants", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Create a new tenant", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "prompt": "Implement the Python class `AdminTenants` described below.\n\nClass description:\nImplement the AdminTenants class.\n\nMethod signatures and docstrings:\n- def get(self, request): Get all tenants\n- def post(self, request): Create a new tenant", "prompted_full_text": "Implement the Python class `AdminTenants` described below.\n\nClass description:\nImplement the AdminTenants class.\n\nMethod signatures and docstrings:\n- def get(self, request): Get all tenants\n- def post(self, request): Create a new tenant\n\n<|skeleton|>\nclass AdminTenants:\n\n def get(self, request):\n \"\"\"Get all tenants\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new tenant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n insts = Tenant.objects.all()\n list_insts = []\n for tenant in insts:\n list_insts.append({'id': tenant.id, 'name': tenant.name, 'ctime': tenant.create_time, 'space_quota': TenantQuota.objects.get_or_none(tenant=tenant), 'space_usage': get_tenant_space_usage(tenant)})\n return api_response(data={'insts': list_insts})\n<|end_body_0|>\n\n<|body_start_1|>\n inst_name = request.POST.get('name').strip()\n if not inst_name:\n return api_error(code=400, msg=_('Tenant name is required.'))\n try:\n Tenant.objects.get(name=inst_name)\n return api_error(code=400, msg=_('Tenant existed. Please choose another name for the tenant.'))\n except Tenant.DoesNotExist:\n pass\n try:\n Tenant.objects.create(name=inst_name)\n return api_response(code=200, msg=_('New tenant created successfully.'))\n except Exception as e:\n logger.error(e)\n return api_error(code=500, msg=_('Internal server error.'))\n<|end_body_1|>\n", "revision_id": "13b3ed26a04248211ef91ca70dccc617be27a3c3", "skeleton": "<|skeleton|>\nclass AdminTenants:\n\n def get(self, request):\n \"\"\"Get all tenants\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"Create a new tenant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdminTenants:\n def get(self, request):\n \"\"\"Get all tenants\"\"\"\n insts = Tenant.objects.all()\n list_insts = []\n for tenant in insts:\n list_insts.append({'id': tenant.id, 'name': tenant.name, 'ctime': tenant.create_time, 'space_quota': TenantQuota.objects.get_or_none(tenant=tenant), 'space_usage': get_tenant_space_usage(tenant)})\n return api_response(data={'insts': list_insts})\n\n def post(self, request):\n \"\"\"Create a new tenant\"\"\"\n inst_name = request.POST.get('name').strip()\n if not inst_name:\n return api_error(code=400, msg=_('Tenant name is required.'))\n try:\n Tenant.objects.get(name=inst_name)\n return api_error(code=400, msg=_('Tenant existed. Please choose another name for the tenant.'))\n except Tenant.DoesNotExist:\n pass\n try:\n Tenant.objects.create(name=inst_name)\n return api_response(code=200, msg=_('New tenant created successfully.'))\n except Exception as e:\n logger.error(e)\n return api_error(code=500, msg=_('Internal server error.'))\n", "source": "the_stack_v2_python_sparse", "source_path": "fhs/usr/share/python/syncwerk/restapi/restapi/api3/custom/admin/tenants.py", "source_repo": "syncwerk/syncwerk-server-restapi", "split": "val", "star_events_count": 0} {"blob_id": "dca124a6c768faea689a05d8c1dffc3f670b8467", "bodies": ["super(DecoderBlock, self).__init__()\nself.mha1 = MultiHeadAttention(dm, h)\nself.mha2 = MultiHeadAttention(dm, h)\nself.dense_hidden = tf.keras.layers.Dense(units=hidden, activation='relu')\nself.dense_output = tf.keras.layers.Dense(units=dm)\nself.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.dropout1 = tf.keras.layers.Dropout(drop_rate)\nself.dropout2 = tf.keras.layers.Dropout(drop_rate)\nself.dropout3 = tf.keras.layers.Dropout(drop_rate)", "attn_out, _ = self.mha1(x, x, x, look_ahead_mask)\nattn_out = self.dropout1(attn_out, training=training)\nout1 = self.layernorm1(x + attn_out)\nattn_out, _ = self.mha2(out1, encoder_output, encoder_output, padding_mask)\nattn_out = self.dropout2(attn_out, training=training)\nout2 = self.layernorm2(out1 + attn_out)\ndense_output = self.dense_hidden(out2)\ndense_output = self.dense_output(dense_output)\ndense_output = self.dropout3(dense_output, training=training)\nout3 = self.layernorm3(out2 + dense_output)\nreturn out3"], "bodies_text": "<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(units=hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(units=dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)\n self.dropout3 = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn_out, _ = self.mha1(x, x, x, look_ahead_mask)\n attn_out = self.dropout1(attn_out, training=training)\n out1 = self.layernorm1(x + attn_out)\n attn_out, _ = self.mha2(out1, encoder_output, encoder_output, padding_mask)\n attn_out = self.dropout2(attn_out, training=training)\n out2 = self.layernorm2(out1 + attn_out)\n dense_output = self.dense_hidden(out2)\n dense_output = self.dense_output(dense_output)\n dense_output = self.dropout3(dense_output, training=training)\n out3 = self.layernorm3(out2 + dense_output)\n return out3\n<|end_body_1|>\n", "class_docstring": "Class representation of a decoder block for a transformer", "class_name": "DecoderBlock", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DecoderBlock:\n \"\"\"Class representation of a decoder block for a transformer\"\"\"\n\n def __init__(self, dm, h, hidden, drop_rate=0.1):\n \"\"\"dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(units=hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(units=dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)\n self.dropout3 = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn_out, _ = self.mha1(x, x, x, look_ahead_mask)\n attn_out = self.dropout1(attn_out, training=training)\n out1 = self.layernorm1(x + attn_out)\n attn_out, _ = self.mha2(out1, encoder_output, encoder_output, padding_mask)\n attn_out = self.dropout2(attn_out, training=training)\n out2 = self.layernorm2(out1 + attn_out)\n dense_output = self.dense_hidden(out2)\n dense_output = self.dense_output(dense_output)\n dense_output = self.dropout3(dense_output, training=training)\n out3 = self.layernorm3(out2 + dense_output)\n return out3\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000158", "length_bytes": 2892, "license_type": "no_license", "methods": [{"docstring": "dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate", "name": "__init__", "signature": "def __init__(self, dm, h, hidden, drop_rate=0.1)"}, {"docstring": "x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output", "name": "call", "signature": "def call(self, x, encoder_output, training, look_ahead_mask, padding_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006806", "prompt": "Implement the Python class `DecoderBlock` described below.\n\nClass description:\nClass representation of a decoder block for a transformer\n\nMethod signatures and docstrings:\n- def __init__(self, dm, h, hidden, drop_rate=0.1): dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\n- def call(self, x, encoder_output, training, look_ahead_mask, padding_mask): x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output", "prompted_full_text": "Implement the Python class `DecoderBlock` described below.\n\nClass description:\nClass representation of a decoder block for a transformer\n\nMethod signatures and docstrings:\n- def __init__(self, dm, h, hidden, drop_rate=0.1): dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\n- def call(self, x, encoder_output, training, look_ahead_mask, padding_mask): x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output\n\n<|skeleton|>\nclass DecoderBlock:\n \"\"\"Class representation of a decoder block for a transformer\"\"\"\n\n def __init__(self, dm, h, hidden, drop_rate=0.1):\n \"\"\"dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(units=hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(units=dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)\n self.dropout3 = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn_out, _ = self.mha1(x, x, x, look_ahead_mask)\n attn_out = self.dropout1(attn_out, training=training)\n out1 = self.layernorm1(x + attn_out)\n attn_out, _ = self.mha2(out1, encoder_output, encoder_output, padding_mask)\n attn_out = self.dropout2(attn_out, training=training)\n out2 = self.layernorm2(out1 + attn_out)\n dense_output = self.dense_hidden(out2)\n dense_output = self.dense_output(dense_output)\n dense_output = self.dropout3(dense_output, training=training)\n out3 = self.layernorm3(out2 + dense_output)\n return out3\n<|end_body_1|>\n", "revision_id": "2757c8526290197d45a4de33cda71e686ddcbf1c", "skeleton": "<|skeleton|>\nclass DecoderBlock:\n \"\"\"Class representation of a decoder block for a transformer\"\"\"\n\n def __init__(self, dm, h, hidden, drop_rate=0.1):\n \"\"\"dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DecoderBlock:\n \"\"\"Class representation of a decoder block for a transformer\"\"\"\n\n def __init__(self, dm, h, hidden, drop_rate=0.1):\n \"\"\"dm: Dimensionality of the model h: Number of heads hidden: Number of hidden units in the fully connected layer drop_rate: Dropout rate\"\"\"\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(units=hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(units=dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)\n self.dropout3 = tf.keras.layers.Dropout(drop_rate)\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"x: tensor of shape (batch, target_seq_len, dm)containing the input to the decoder block encoder_output: tensor of shape (batch, input_seq_len, dm)containing the output of the encoder training: boolean to determine if the model is training look_ahead_mask: mask to be applied to the first multi head attention layer padding_mask: mask to be applied to the second multi head attention layer Returns: tensor of shape (batch, target_seq_len, dm) containing the block's output\"\"\"\n attn_out, _ = self.mha1(x, x, x, look_ahead_mask)\n attn_out = self.dropout1(attn_out, training=training)\n out1 = self.layernorm1(x + attn_out)\n attn_out, _ = self.mha2(out1, encoder_output, encoder_output, padding_mask)\n attn_out = self.dropout2(attn_out, training=training)\n out2 = self.layernorm2(out1 + attn_out)\n dense_output = self.dense_hidden(out2)\n dense_output = self.dense_output(dense_output)\n dense_output = self.dropout3(dense_output, training=training)\n out3 = self.layernorm3(out2 + dense_output)\n return out3\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x11-attention/8-transformer_decoder_block.py", "source_repo": "95ktsmith/holbertonschool-machine_learning", "split": "val", "star_events_count": 0} {"blob_id": "52570fc4af2574ce42ffbe00851bb6096dddd85e", "bodies": ["if xml_val not in cls._xml_to_member:\n raise InvalidXmlError(\"attribute value '%s' not valid for this type\" % xml_val)\nreturn cls._xml_to_member[xml_val]", "if enum_val not in cls._member_to_xml:\n raise ValueError(\"value '%s' not in enumeration %s\" % (enum_val, cls.__name__))\nreturn cls._member_to_xml[enum_val]"], "bodies_text": "<|body_start_0|>\n if xml_val not in cls._xml_to_member:\n raise InvalidXmlError(\"attribute value '%s' not valid for this type\" % xml_val)\n return cls._xml_to_member[xml_val]\n<|end_body_0|>\n\n<|body_start_1|>\n if enum_val not in cls._member_to_xml:\n raise ValueError(\"value '%s' not in enumeration %s\" % (enum_val, cls.__name__))\n return cls._member_to_xml[enum_val]\n<|end_body_1|>\n", "class_docstring": "Provides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features", "class_name": "XmlEnumeration", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XmlEnumeration:\n \"\"\"Provides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\"\"\"\n\n def from_xml(cls, xml_val):\n \"\"\"Return the enumeration member corresponding to the XML value *xml_val*.\"\"\"\n <|body_0|>\n\n def to_xml(cls, enum_val):\n \"\"\"Return the XML value of the enumeration value *enum_val*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if xml_val not in cls._xml_to_member:\n raise InvalidXmlError(\"attribute value '%s' not valid for this type\" % xml_val)\n return cls._xml_to_member[xml_val]\n<|end_body_0|>\n\n<|body_start_1|>\n if enum_val not in cls._member_to_xml:\n raise ValueError(\"value '%s' not in enumeration %s\" % (enum_val, cls.__name__))\n return cls._member_to_xml[enum_val]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000159", "length_bytes": 10958, "license_type": "permissive", "methods": [{"docstring": "Return the enumeration member corresponding to the XML value *xml_val*.", "name": "from_xml", "signature": "def from_xml(cls, xml_val)"}, {"docstring": "Return the XML value of the enumeration value *enum_val*.", "name": "to_xml", "signature": "def to_xml(cls, enum_val)"}], "n_methods": 2, "prompt": "Implement the Python class `XmlEnumeration` described below.\n\nClass description:\nProvides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\n\nMethod signatures and docstrings:\n- def from_xml(cls, xml_val): Return the enumeration member corresponding to the XML value *xml_val*.\n- def to_xml(cls, enum_val): Return the XML value of the enumeration value *enum_val*.", "prompted_full_text": "Implement the Python class `XmlEnumeration` described below.\n\nClass description:\nProvides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\n\nMethod signatures and docstrings:\n- def from_xml(cls, xml_val): Return the enumeration member corresponding to the XML value *xml_val*.\n- def to_xml(cls, enum_val): Return the XML value of the enumeration value *enum_val*.\n\n<|skeleton|>\nclass XmlEnumeration:\n \"\"\"Provides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\"\"\"\n\n def from_xml(cls, xml_val):\n \"\"\"Return the enumeration member corresponding to the XML value *xml_val*.\"\"\"\n <|body_0|>\n\n def to_xml(cls, enum_val):\n \"\"\"Return the XML value of the enumeration value *enum_val*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if xml_val not in cls._xml_to_member:\n raise InvalidXmlError(\"attribute value '%s' not valid for this type\" % xml_val)\n return cls._xml_to_member[xml_val]\n<|end_body_0|>\n\n<|body_start_1|>\n if enum_val not in cls._member_to_xml:\n raise ValueError(\"value '%s' not in enumeration %s\" % (enum_val, cls.__name__))\n return cls._member_to_xml[enum_val]\n<|end_body_1|>\n", "revision_id": "cabf6e4f1970dc14302f87414f170de19944bac2", "skeleton": "<|skeleton|>\nclass XmlEnumeration:\n \"\"\"Provides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\"\"\"\n\n def from_xml(cls, xml_val):\n \"\"\"Return the enumeration member corresponding to the XML value *xml_val*.\"\"\"\n <|body_0|>\n\n def to_xml(cls, enum_val):\n \"\"\"Return the XML value of the enumeration value *enum_val*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class XmlEnumeration:\n \"\"\"Provides ``to_xml()`` and ``from_xml()`` methods in addition to base enumeration features\"\"\"\n\n def from_xml(cls, xml_val):\n \"\"\"Return the enumeration member corresponding to the XML value *xml_val*.\"\"\"\n if xml_val not in cls._xml_to_member:\n raise InvalidXmlError(\"attribute value '%s' not valid for this type\" % xml_val)\n return cls._xml_to_member[xml_val]\n\n def to_xml(cls, enum_val):\n \"\"\"Return the XML value of the enumeration value *enum_val*.\"\"\"\n if enum_val not in cls._member_to_xml:\n raise ValueError(\"value '%s' not in enumeration %s\" % (enum_val, cls.__name__))\n return cls._member_to_xml[enum_val]\n", "source": "the_stack_v2_python_sparse", "source_path": "Pdf_docx_pptx_xlsx_epub_png/source/docx/enum/base.py", "source_repo": "ryfeus/lambda-packs", "split": "val", "star_events_count": 1283} {"blob_id": "c260f98429f2a4ac80419689c1f658c1bd8179b8", "bodies": ["self.assertTrue(anagram('cinema', 'iceman'))\nself.assertTrue(anagram('dormitory', 'dirtyroom'))\nself.assertFalse(anagram('hello', 'lohae'))\nself.assertFalse(anagram('ill', 'like'))\nself.assertFalse(anagram('illness', 'nes'))", "self.assertTrue(anagram_dd('cinema', 'iceman'))\nself.assertTrue(anagram_dd('dormitory', 'dirtyroom'))\nself.assertFalse(anagram_dd('hello', 'lohae'))\nself.assertFalse(anagram_dd('ill', 'like'))\nself.assertFalse(anagram('illness', 'nes'))", "woodchucks = [('how', 3), ('much', 3), ('wood', 3), ('would', 2), ('a', 1), ('woodchuck', 1), ('chuck', 3), ('if', 1), ('a', 1), ('woodchuck', 2), ('could', 2), ('chuck', 1), ('wood', 1)]\nresult1 = [['a', [1]], ['chuck', [1, 3]], ['could', [2]], ['how', [3]], ['if', [1]], ['much', [3]], ['wood', [1, 3]], ['woodchuck', [1, 2]], ['would', [2]]]\nself.assertEqual(book_index(woodchucks), result1)"], "bodies_text": "<|body_start_0|>\n self.assertTrue(anagram('cinema', 'iceman'))\n self.assertTrue(anagram('dormitory', 'dirtyroom'))\n self.assertFalse(anagram('hello', 'lohae'))\n self.assertFalse(anagram('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertTrue(anagram_dd('cinema', 'iceman'))\n self.assertTrue(anagram_dd('dormitory', 'dirtyroom'))\n self.assertFalse(anagram_dd('hello', 'lohae'))\n self.assertFalse(anagram_dd('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_1|>\n\n<|body_start_2|>\n woodchucks = [('how', 3), ('much', 3), ('wood', 3), ('would', 2), ('a', 1), ('woodchuck', 1), ('chuck', 3), ('if', 1), ('a', 1), ('woodchuck', 2), ('could', 2), ('chuck', 1), ('wood', 1)]\n result1 = [['a', [1]], ['chuck', [1, 3]], ['could', [2]], ['how', [3]], ['if', [1]], ['much', [3]], ['wood', [1, 3]], ['woodchuck', [1, 2]], ['would', [2]]]\n self.assertEqual(book_index(woodchucks), result1)\n<|end_body_2|>\n", "class_docstring": "verify that functions works fine", "class_name": "FunctionTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FunctionTest:\n \"\"\"verify that functions works fine\"\"\"\n\n def test_anagram(self):\n \"\"\"verify anagram works fine\"\"\"\n <|body_0|>\n\n def test_anagram_dd(self):\n \"\"\"verify anagram_dd works fine\"\"\"\n <|body_1|>\n\n def test_book_index(self):\n \"\"\"verify that book_index works fine\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertTrue(anagram('cinema', 'iceman'))\n self.assertTrue(anagram('dormitory', 'dirtyroom'))\n self.assertFalse(anagram('hello', 'lohae'))\n self.assertFalse(anagram('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertTrue(anagram_dd('cinema', 'iceman'))\n self.assertTrue(anagram_dd('dormitory', 'dirtyroom'))\n self.assertFalse(anagram_dd('hello', 'lohae'))\n self.assertFalse(anagram_dd('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_1|>\n\n<|body_start_2|>\n woodchucks = [('how', 3), ('much', 3), ('wood', 3), ('would', 2), ('a', 1), ('woodchuck', 1), ('chuck', 3), ('if', 1), ('a', 1), ('woodchuck', 2), ('could', 2), ('chuck', 1), ('wood', 1)]\n result1 = [['a', [1]], ['chuck', [1, 3]], ['could', [2]], ['how', [3]], ['if', [1]], ['much', [3]], ['wood', [1, 3]], ['woodchuck', [1, 2]], ['would', [2]]]\n self.assertEqual(book_index(woodchucks), result1)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000160", "length_bytes": 2565, "license_type": "no_license", "methods": [{"docstring": "verify anagram works fine", "name": "test_anagram", "signature": "def test_anagram(self)"}, {"docstring": "verify anagram_dd works fine", "name": "test_anagram_dd", "signature": "def test_anagram_dd(self)"}, {"docstring": "verify that book_index works fine", "name": "test_book_index", "signature": "def test_book_index(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002161", "prompt": "Implement the Python class `FunctionTest` described below.\n\nClass description:\nverify that functions works fine\n\nMethod signatures and docstrings:\n- def test_anagram(self): verify anagram works fine\n- def test_anagram_dd(self): verify anagram_dd works fine\n- def test_book_index(self): verify that book_index works fine", "prompted_full_text": "Implement the Python class `FunctionTest` described below.\n\nClass description:\nverify that functions works fine\n\nMethod signatures and docstrings:\n- def test_anagram(self): verify anagram works fine\n- def test_anagram_dd(self): verify anagram_dd works fine\n- def test_book_index(self): verify that book_index works fine\n\n<|skeleton|>\nclass FunctionTest:\n \"\"\"verify that functions works fine\"\"\"\n\n def test_anagram(self):\n \"\"\"verify anagram works fine\"\"\"\n <|body_0|>\n\n def test_anagram_dd(self):\n \"\"\"verify anagram_dd works fine\"\"\"\n <|body_1|>\n\n def test_book_index(self):\n \"\"\"verify that book_index works fine\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertTrue(anagram('cinema', 'iceman'))\n self.assertTrue(anagram('dormitory', 'dirtyroom'))\n self.assertFalse(anagram('hello', 'lohae'))\n self.assertFalse(anagram('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertTrue(anagram_dd('cinema', 'iceman'))\n self.assertTrue(anagram_dd('dormitory', 'dirtyroom'))\n self.assertFalse(anagram_dd('hello', 'lohae'))\n self.assertFalse(anagram_dd('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n<|end_body_1|>\n\n<|body_start_2|>\n woodchucks = [('how', 3), ('much', 3), ('wood', 3), ('would', 2), ('a', 1), ('woodchuck', 1), ('chuck', 3), ('if', 1), ('a', 1), ('woodchuck', 2), ('could', 2), ('chuck', 1), ('wood', 1)]\n result1 = [['a', [1]], ['chuck', [1, 3]], ['could', [2]], ['how', [3]], ['if', [1]], ['much', [3]], ['wood', [1, 3]], ['woodchuck', [1, 2]], ['would', [2]]]\n self.assertEqual(book_index(woodchucks), result1)\n<|end_body_2|>\n", "revision_id": "f45bd7c20e91584428c90a332173ee9c8fa66a4c", "skeleton": "<|skeleton|>\nclass FunctionTest:\n \"\"\"verify that functions works fine\"\"\"\n\n def test_anagram(self):\n \"\"\"verify anagram works fine\"\"\"\n <|body_0|>\n\n def test_anagram_dd(self):\n \"\"\"verify anagram_dd works fine\"\"\"\n <|body_1|>\n\n def test_book_index(self):\n \"\"\"verify that book_index works fine\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FunctionTest:\n \"\"\"verify that functions works fine\"\"\"\n\n def test_anagram(self):\n \"\"\"verify anagram works fine\"\"\"\n self.assertTrue(anagram('cinema', 'iceman'))\n self.assertTrue(anagram('dormitory', 'dirtyroom'))\n self.assertFalse(anagram('hello', 'lohae'))\n self.assertFalse(anagram('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n\n def test_anagram_dd(self):\n \"\"\"verify anagram_dd works fine\"\"\"\n self.assertTrue(anagram_dd('cinema', 'iceman'))\n self.assertTrue(anagram_dd('dormitory', 'dirtyroom'))\n self.assertFalse(anagram_dd('hello', 'lohae'))\n self.assertFalse(anagram_dd('ill', 'like'))\n self.assertFalse(anagram('illness', 'nes'))\n\n def test_book_index(self):\n \"\"\"verify that book_index works fine\"\"\"\n woodchucks = [('how', 3), ('much', 3), ('wood', 3), ('would', 2), ('a', 1), ('woodchuck', 1), ('chuck', 3), ('if', 1), ('a', 1), ('woodchuck', 2), ('could', 2), ('chuck', 1), ('wood', 1)]\n result1 = [['a', [1]], ['chuck', [1, 3]], ['could', [2]], ['how', [3]], ['if', [1]], ['much', [3]], ['wood', [1, 3]], ['woodchuck', [1, 2]], ['would', [2]]]\n self.assertEqual(book_index(woodchucks), result1)\n", "source": "the_stack_v2_python_sparse", "source_path": "HanrunLiHW07.py", "source_repo": "obleevious/SSW810Final_repo", "split": "val", "star_events_count": 0} {"blob_id": "1abe9da6a20bb1086c0837faf9fd4e7af63f03e8", "bodies": ["self._payment_dates = payment_dates\nself._payment_steps = payment_steps\nself._maturity = payment_dates[len(payment_dates) - 1]\nself._steps = payment_steps[len(payment_steps) - 1]\nself._coupon_rates = coupon_rates\nself._frequency = frequency\nself._bond_tree = {}", "if not hw_tree._is_built:\n hw_tree.hw_prob()\n hw_tree.calibrate()", "self.build_hw_tree(hw_tree)\nfor i in reversed(range(self._steps + 1)):\n for j in range(-i, i + 1):\n if i == self._steps:\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = 1 + self._coupon_rates[index] / self._frequency\n continue\n else:\n discounted_expected_value = 0\n for k in range(j - 2, j + 3):\n if k <= i + 1 and k >= -i - 1:\n discounted_expected_value += hw_tree.prob(j, k) * self._bond_tree[i + 1, k]\n else:\n continue\n discounted_expected_value *= hw_tree._discount_factor_tree[i, j]\n if i in self._payment_steps.tolist():\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = discounted_expected_value + self._coupon_rates[index] / self._frequency\n else:\n self._bond_tree[i, j] = discounted_expected_value\nreturn self._bond_tree[0, 0]"], "bodies_text": "<|body_start_0|>\n self._payment_dates = payment_dates\n self._payment_steps = payment_steps\n self._maturity = payment_dates[len(payment_dates) - 1]\n self._steps = payment_steps[len(payment_steps) - 1]\n self._coupon_rates = coupon_rates\n self._frequency = frequency\n self._bond_tree = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if not hw_tree._is_built:\n hw_tree.hw_prob()\n hw_tree.calibrate()\n<|end_body_1|>\n\n<|body_start_2|>\n self.build_hw_tree(hw_tree)\n for i in reversed(range(self._steps + 1)):\n for j in range(-i, i + 1):\n if i == self._steps:\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = 1 + self._coupon_rates[index] / self._frequency\n continue\n else:\n discounted_expected_value = 0\n for k in range(j - 2, j + 3):\n if k <= i + 1 and k >= -i - 1:\n discounted_expected_value += hw_tree.prob(j, k) * self._bond_tree[i + 1, k]\n else:\n continue\n discounted_expected_value *= hw_tree._discount_factor_tree[i, j]\n if i in self._payment_steps.tolist():\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = discounted_expected_value + self._coupon_rates[index] / self._frequency\n else:\n self._bond_tree[i, j] = discounted_expected_value\n return self._bond_tree[0, 0]\n<|end_body_2|>\n", "class_docstring": "Representation of a Bond", "class_name": "Bond", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Bond:\n \"\"\"Representation of a Bond\"\"\"\n\n def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2):\n \"\"\"Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\"\"\"\n <|body_0|>\n\n def build_hw_tree(self, hw_tree):\n \"\"\"Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\"\"\"\n <|body_1|>\n\n def get_price(self, hw_tree):\n \"\"\"This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._payment_dates = payment_dates\n self._payment_steps = payment_steps\n self._maturity = payment_dates[len(payment_dates) - 1]\n self._steps = payment_steps[len(payment_steps) - 1]\n self._coupon_rates = coupon_rates\n self._frequency = frequency\n self._bond_tree = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if not hw_tree._is_built:\n hw_tree.hw_prob()\n hw_tree.calibrate()\n<|end_body_1|>\n\n<|body_start_2|>\n self.build_hw_tree(hw_tree)\n for i in reversed(range(self._steps + 1)):\n for j in range(-i, i + 1):\n if i == self._steps:\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = 1 + self._coupon_rates[index] / self._frequency\n continue\n else:\n discounted_expected_value = 0\n for k in range(j - 2, j + 3):\n if k <= i + 1 and k >= -i - 1:\n discounted_expected_value += hw_tree.prob(j, k) * self._bond_tree[i + 1, k]\n else:\n continue\n discounted_expected_value *= hw_tree._discount_factor_tree[i, j]\n if i in self._payment_steps.tolist():\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = discounted_expected_value + self._coupon_rates[index] / self._frequency\n else:\n self._bond_tree[i, j] = discounted_expected_value\n return self._bond_tree[0, 0]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000161", "length_bytes": 6090, "license_type": "no_license", "methods": [{"docstring": "Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2", "name": "__init__", "signature": "def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2)"}, {"docstring": "Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance", "name": "build_hw_tree", "signature": "def build_hw_tree(self, hw_tree)"}, {"docstring": "This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price", "name": "get_price", "signature": "def get_price(self, hw_tree)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000055", "prompt": "Implement the Python class `Bond` described below.\n\nClass description:\nRepresentation of a Bond\n\nMethod signatures and docstrings:\n- def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2): Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\n- def build_hw_tree(self, hw_tree): Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\n- def get_price(self, hw_tree): This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price", "prompted_full_text": "Implement the Python class `Bond` described below.\n\nClass description:\nRepresentation of a Bond\n\nMethod signatures and docstrings:\n- def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2): Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\n- def build_hw_tree(self, hw_tree): Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\n- def get_price(self, hw_tree): This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price\n\n<|skeleton|>\nclass Bond:\n \"\"\"Representation of a Bond\"\"\"\n\n def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2):\n \"\"\"Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\"\"\"\n <|body_0|>\n\n def build_hw_tree(self, hw_tree):\n \"\"\"Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\"\"\"\n <|body_1|>\n\n def get_price(self, hw_tree):\n \"\"\"This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._payment_dates = payment_dates\n self._payment_steps = payment_steps\n self._maturity = payment_dates[len(payment_dates) - 1]\n self._steps = payment_steps[len(payment_steps) - 1]\n self._coupon_rates = coupon_rates\n self._frequency = frequency\n self._bond_tree = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if not hw_tree._is_built:\n hw_tree.hw_prob()\n hw_tree.calibrate()\n<|end_body_1|>\n\n<|body_start_2|>\n self.build_hw_tree(hw_tree)\n for i in reversed(range(self._steps + 1)):\n for j in range(-i, i + 1):\n if i == self._steps:\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = 1 + self._coupon_rates[index] / self._frequency\n continue\n else:\n discounted_expected_value = 0\n for k in range(j - 2, j + 3):\n if k <= i + 1 and k >= -i - 1:\n discounted_expected_value += hw_tree.prob(j, k) * self._bond_tree[i + 1, k]\n else:\n continue\n discounted_expected_value *= hw_tree._discount_factor_tree[i, j]\n if i in self._payment_steps.tolist():\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = discounted_expected_value + self._coupon_rates[index] / self._frequency\n else:\n self._bond_tree[i, j] = discounted_expected_value\n return self._bond_tree[0, 0]\n<|end_body_2|>\n", "revision_id": "9f710a8de56fb9b4456c6f98be91f4b22ef5ede5", "skeleton": "<|skeleton|>\nclass Bond:\n \"\"\"Representation of a Bond\"\"\"\n\n def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2):\n \"\"\"Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\"\"\"\n <|body_0|>\n\n def build_hw_tree(self, hw_tree):\n \"\"\"Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\"\"\"\n <|body_1|>\n\n def get_price(self, hw_tree):\n \"\"\"This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Bond:\n \"\"\"Representation of a Bond\"\"\"\n\n def __init__(self, payment_dates, payment_steps, coupon_rates, frequency=2):\n \"\"\"Initialize a Bond object Parameters ---------- payment_dates : array_like of shape (M, ) with datetime payment dates payment_steps : array_like of shape (M, ) with integer payment steps that corresponds to the tree coupon_rates : scalar or array_like of shape (M, ) with the coupon rates frequency : integer scalar that specifies the number of payment for each year, default value is 2\"\"\"\n self._payment_dates = payment_dates\n self._payment_steps = payment_steps\n self._maturity = payment_dates[len(payment_dates) - 1]\n self._steps = payment_steps[len(payment_steps) - 1]\n self._coupon_rates = coupon_rates\n self._frequency = frequency\n self._bond_tree = {}\n\n def build_hw_tree(self, hw_tree):\n \"\"\"Helper function that builds and calibrate a Hull-White tree Parameters ---------- hw_tree: a HWTree class instance\"\"\"\n if not hw_tree._is_built:\n hw_tree.hw_prob()\n hw_tree.calibrate()\n\n def get_price(self, hw_tree):\n \"\"\"This function computes the bond price Parameters ---------- hw_tree: an HWTree class instance Return ------ out: float scalar that specifies the bond price\"\"\"\n self.build_hw_tree(hw_tree)\n for i in reversed(range(self._steps + 1)):\n for j in range(-i, i + 1):\n if i == self._steps:\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = 1 + self._coupon_rates[index] / self._frequency\n continue\n else:\n discounted_expected_value = 0\n for k in range(j - 2, j + 3):\n if k <= i + 1 and k >= -i - 1:\n discounted_expected_value += hw_tree.prob(j, k) * self._bond_tree[i + 1, k]\n else:\n continue\n discounted_expected_value *= hw_tree._discount_factor_tree[i, j]\n if i in self._payment_steps.tolist():\n index = self._payment_steps.tolist().index(i)\n self._bond_tree[i, j] = discounted_expected_value + self._coupon_rates[index] / self._frequency\n else:\n self._bond_tree[i, j] = discounted_expected_value\n return self._bond_tree[0, 0]\n", "source": "the_stack_v2_python_sparse", "source_path": "Hull-White Model/simple_bond.py", "source_repo": "jesusmramirez/Term-Structure-Models", "split": "val", "star_events_count": 1} {"blob_id": "3c54d4d5a6dbb932dbd355a2eed5e7d066a8e03b", "bodies": ["self.parsing_error = None\nself.tree = None\ntry:\n self.tree = lxml.etree.fromstring(data)\nexcept lxml.etree.XMLSyntaxError as ex:\n self.parsing_error = 'Parsing error: ' + str(ex)", "if self.parsing_error:\n return self.parsing_error\nfor node in self.tree.iter():\n if isinstance(node, lxml.etree._ProcessingInstruction):\n return 'Processing instruction found.'\n elif isinstance(node, lxml.etree._Element):\n if node.tag != '{http://www.w3.org/1999/xhtml}div':\n return 'Element outside the xhtml namespace: ' + node.tag\n for attr in node.attrib.keys():\n if attr == 'xmlns':\n if node.attrib[attr] != 'http://www.w3.org/1999/xhtml':\n return 'Attribute xmlns with invalid value: ' + node.attrib[attr] + '.'\n elif attr != 'class' and (not attr.startswith('data-wed-')):\n return 'Invalid attribute: ' + attr + '.'\nreturn False", "class_sought = 'btw:lemma'\nlemma = self.tree.xpath(\"xhtml:div[contains(@class, '\" + class_sought + \"')]\", namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})\nif len(lemma):\n classes = lemma[0].get('class').strip().split()\n if not any((x == class_sought for x in classes)):\n lemma = []\nif not len(lemma):\n return None\nlemma = lemma[0].text\nif lemma is None:\n return None\nlemma = lemma.strip()\nif len(lemma) == 0:\n return None\nreturn lemma", "authority = self.tree.get('data-wed-authority')\nif authority is None:\n raise ValueError(\"can't find the authority in the data passed\")\nreturn authority.strip()"], "bodies_text": "<|body_start_0|>\n self.parsing_error = None\n self.tree = None\n try:\n self.tree = lxml.etree.fromstring(data)\n except lxml.etree.XMLSyntaxError as ex:\n self.parsing_error = 'Parsing error: ' + str(ex)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.parsing_error:\n return self.parsing_error\n for node in self.tree.iter():\n if isinstance(node, lxml.etree._ProcessingInstruction):\n return 'Processing instruction found.'\n elif isinstance(node, lxml.etree._Element):\n if node.tag != '{http://www.w3.org/1999/xhtml}div':\n return 'Element outside the xhtml namespace: ' + node.tag\n for attr in node.attrib.keys():\n if attr == 'xmlns':\n if node.attrib[attr] != 'http://www.w3.org/1999/xhtml':\n return 'Attribute xmlns with invalid value: ' + node.attrib[attr] + '.'\n elif attr != 'class' and (not attr.startswith('data-wed-')):\n return 'Invalid attribute: ' + attr + '.'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n class_sought = 'btw:lemma'\n lemma = self.tree.xpath(\"xhtml:div[contains(@class, '\" + class_sought + \"')]\", namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})\n if len(lemma):\n classes = lemma[0].get('class').strip().split()\n if not any((x == class_sought for x in classes)):\n lemma = []\n if not len(lemma):\n return None\n lemma = lemma[0].text\n if lemma is None:\n return None\n lemma = lemma.strip()\n if len(lemma) == 0:\n return None\n return lemma\n<|end_body_2|>\n\n<|body_start_3|>\n authority = self.tree.get('data-wed-authority')\n if authority is None:\n raise ValueError(\"can't find the authority in the data passed\")\n return authority.strip()\n<|end_body_3|>\n", "class_docstring": "", "class_name": "XMLTree", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XMLTree:\n\n def __init__(self, data):\n \"\"\"The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\"\"\"\n <|body_0|>\n\n def is_data_unclean(self):\n \"\"\"Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\"\"\"\n <|body_1|>\n\n def extract_headword(self):\n \"\"\"Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\"\"\"\n <|body_2|>\n\n def extract_authority(self):\n \"\"\"Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.parsing_error = None\n self.tree = None\n try:\n self.tree = lxml.etree.fromstring(data)\n except lxml.etree.XMLSyntaxError as ex:\n self.parsing_error = 'Parsing error: ' + str(ex)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.parsing_error:\n return self.parsing_error\n for node in self.tree.iter():\n if isinstance(node, lxml.etree._ProcessingInstruction):\n return 'Processing instruction found.'\n elif isinstance(node, lxml.etree._Element):\n if node.tag != '{http://www.w3.org/1999/xhtml}div':\n return 'Element outside the xhtml namespace: ' + node.tag\n for attr in node.attrib.keys():\n if attr == 'xmlns':\n if node.attrib[attr] != 'http://www.w3.org/1999/xhtml':\n return 'Attribute xmlns with invalid value: ' + node.attrib[attr] + '.'\n elif attr != 'class' and (not attr.startswith('data-wed-')):\n return 'Invalid attribute: ' + attr + '.'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n class_sought = 'btw:lemma'\n lemma = self.tree.xpath(\"xhtml:div[contains(@class, '\" + class_sought + \"')]\", namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})\n if len(lemma):\n classes = lemma[0].get('class').strip().split()\n if not any((x == class_sought for x in classes)):\n lemma = []\n if not len(lemma):\n return None\n lemma = lemma[0].text\n if lemma is None:\n return None\n lemma = lemma.strip()\n if len(lemma) == 0:\n return None\n return lemma\n<|end_body_2|>\n\n<|body_start_3|>\n authority = self.tree.get('data-wed-authority')\n if authority is None:\n raise ValueError(\"can't find the authority in the data passed\")\n return authority.strip()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000162", "length_bytes": 4781, "license_type": "no_license", "methods": [{"docstring": "The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str", "name": "__init__", "signature": "def __init__(self, data)"}, {"docstring": "Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.", "name": "is_data_unclean", "signature": "def is_data_unclean(self)"}, {"docstring": "Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str", "name": "extract_headword", "signature": "def extract_headword(self)"}, {"docstring": "Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str", "name": "extract_authority", "signature": "def extract_authority(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000911", "prompt": "Implement the Python class `XMLTree` described below.\n\nClass description:\nImplement the XMLTree class.\n\nMethod signatures and docstrings:\n- def __init__(self, data): The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\n- def is_data_unclean(self): Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\n- def extract_headword(self): Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\n- def extract_authority(self): Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str", "prompted_full_text": "Implement the Python class `XMLTree` described below.\n\nClass description:\nImplement the XMLTree class.\n\nMethod signatures and docstrings:\n- def __init__(self, data): The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\n- def is_data_unclean(self): Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\n- def extract_headword(self): Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\n- def extract_authority(self): Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str\n\n<|skeleton|>\nclass XMLTree:\n\n def __init__(self, data):\n \"\"\"The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\"\"\"\n <|body_0|>\n\n def is_data_unclean(self):\n \"\"\"Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\"\"\"\n <|body_1|>\n\n def extract_headword(self):\n \"\"\"Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\"\"\"\n <|body_2|>\n\n def extract_authority(self):\n \"\"\"Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.parsing_error = None\n self.tree = None\n try:\n self.tree = lxml.etree.fromstring(data)\n except lxml.etree.XMLSyntaxError as ex:\n self.parsing_error = 'Parsing error: ' + str(ex)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.parsing_error:\n return self.parsing_error\n for node in self.tree.iter():\n if isinstance(node, lxml.etree._ProcessingInstruction):\n return 'Processing instruction found.'\n elif isinstance(node, lxml.etree._Element):\n if node.tag != '{http://www.w3.org/1999/xhtml}div':\n return 'Element outside the xhtml namespace: ' + node.tag\n for attr in node.attrib.keys():\n if attr == 'xmlns':\n if node.attrib[attr] != 'http://www.w3.org/1999/xhtml':\n return 'Attribute xmlns with invalid value: ' + node.attrib[attr] + '.'\n elif attr != 'class' and (not attr.startswith('data-wed-')):\n return 'Invalid attribute: ' + attr + '.'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n class_sought = 'btw:lemma'\n lemma = self.tree.xpath(\"xhtml:div[contains(@class, '\" + class_sought + \"')]\", namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})\n if len(lemma):\n classes = lemma[0].get('class').strip().split()\n if not any((x == class_sought for x in classes)):\n lemma = []\n if not len(lemma):\n return None\n lemma = lemma[0].text\n if lemma is None:\n return None\n lemma = lemma.strip()\n if len(lemma) == 0:\n return None\n return lemma\n<|end_body_2|>\n\n<|body_start_3|>\n authority = self.tree.get('data-wed-authority')\n if authority is None:\n raise ValueError(\"can't find the authority in the data passed\")\n return authority.strip()\n<|end_body_3|>\n", "revision_id": "ea54b58ef15a738500547e73e02935d95775c798", "skeleton": "<|skeleton|>\nclass XMLTree:\n\n def __init__(self, data):\n \"\"\"The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\"\"\"\n <|body_0|>\n\n def is_data_unclean(self):\n \"\"\"Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\"\"\"\n <|body_1|>\n\n def extract_headword(self):\n \"\"\"Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\"\"\"\n <|body_2|>\n\n def extract_authority(self):\n \"\"\"Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class XMLTree:\n def __init__(self, data):\n \"\"\"The XML tree representation of the data. Allows performing operations on this tree or querying it. :param data: The data to parse. :type data: str\"\"\"\n self.parsing_error = None\n self.tree = None\n try:\n self.tree = lxml.etree.fromstring(data)\n except lxml.etree.XMLSyntaxError as ex:\n self.parsing_error = 'Parsing error: ' + str(ex)\n\n def is_data_unclean(self):\n \"\"\"Ensure that the tree parses as XML and that it contains only div elements in the ``http://www.w3.org/1999/xhtml`` namespace, no processing instructions, no attributes in any namespace and no attribute other than ``class`` or ``data-wed-*``. :returns: Evaluates to False if the tree is clean, True if not. When unclean the value returned is a diagnosis message. .. warning:: This method is security-critical. In theory it would be possible for one user of the system to include JavaScript in the data they send to BTW. This JavaScript could then be loaded in someone else's browser and executed there.\"\"\"\n if self.parsing_error:\n return self.parsing_error\n for node in self.tree.iter():\n if isinstance(node, lxml.etree._ProcessingInstruction):\n return 'Processing instruction found.'\n elif isinstance(node, lxml.etree._Element):\n if node.tag != '{http://www.w3.org/1999/xhtml}div':\n return 'Element outside the xhtml namespace: ' + node.tag\n for attr in node.attrib.keys():\n if attr == 'xmlns':\n if node.attrib[attr] != 'http://www.w3.org/1999/xhtml':\n return 'Attribute xmlns with invalid value: ' + node.attrib[attr] + '.'\n elif attr != 'class' and (not attr.startswith('data-wed-')):\n return 'Invalid attribute: ' + attr + '.'\n return False\n\n def extract_headword(self):\n \"\"\"Extracts the headword from the XML tree. This is the contents of the btw:lemma element. :returns: The headword. :rtype: str\"\"\"\n class_sought = 'btw:lemma'\n lemma = self.tree.xpath(\"xhtml:div[contains(@class, '\" + class_sought + \"')]\", namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})\n if len(lemma):\n classes = lemma[0].get('class').strip().split()\n if not any((x == class_sought for x in classes)):\n lemma = []\n if not len(lemma):\n return None\n lemma = lemma[0].text\n if lemma is None:\n return None\n lemma = lemma.strip()\n if len(lemma) == 0:\n return None\n return lemma\n\n def extract_authority(self):\n \"\"\"Extracts the authority from the XML tree. This is the contents of the authority attribute on the top element. :returns: The authority :rtype: str\"\"\"\n authority = self.tree.get('data-wed-authority')\n if authority is None:\n raise ValueError(\"can't find the authority in the data passed\")\n return authority.strip()\n", "source": "the_stack_v2_python_sparse", "source_path": "lexicography/xml.py", "source_repo": "keisetsu/btw", "split": "val", "star_events_count": 0} {"blob_id": "b5345a93d0ebb46fc005f8549fadef242564c0c3", "bodies": ["_, ext = os.path.splitext(post_file.name)\next = ext[1:] if ext.startswith('.') else ext\nhashes = generate_hashes(post_file)\npost_file.seek(0)\nexisting = Object.objects.filter(sha512=hashes.get('sha512'))\nif existing.exists():\n LOGGER.debug('De-duped existing upload %s', existing.first().filename)\n return (existing.first(), False)\nnew_upload = Object(file=save_from_post(post_file.read(), extension=ext))\nnew_upload.save()\nLOGGER.info('Uploaded %s', new_upload.filename)\nreturn (new_upload, True)", "for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info('Uploaded %s', new_upload.filename)\nreturn HttpResponse(status=204)"], "bodies_text": "<|body_start_0|>\n _, ext = os.path.splitext(post_file.name)\n ext = ext[1:] if ext.startswith('.') else ext\n hashes = generate_hashes(post_file)\n post_file.seek(0)\n existing = Object.objects.filter(sha512=hashes.get('sha512'))\n if existing.exists():\n LOGGER.debug('De-duped existing upload %s', existing.first().filename)\n return (existing.first(), False)\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info('Uploaded %s', new_upload.filename)\n return (new_upload, True)\n<|end_body_0|>\n\n<|body_start_1|>\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info('Uploaded %s', new_upload.filename)\n return HttpResponse(status=204)\n<|end_body_1|>\n", "class_docstring": "Handle uploads from browser", "class_name": "BrowserObjectView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BrowserObjectView:\n \"\"\"Handle uploads from browser\"\"\"\n\n def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n \"\"\"Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Create Upload objects from request\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _, ext = os.path.splitext(post_file.name)\n ext = ext[1:] if ext.startswith('.') else ext\n hashes = generate_hashes(post_file)\n post_file.seek(0)\n existing = Object.objects.filter(sha512=hashes.get('sha512'))\n if existing.exists():\n LOGGER.debug('De-duped existing upload %s', existing.first().filename)\n return (existing.first(), False)\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info('Uploaded %s', new_upload.filename)\n return (new_upload, True)\n<|end_body_0|>\n\n<|body_start_1|>\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info('Uploaded %s', new_upload.filename)\n return HttpResponse(status=204)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000163", "length_bytes": 8125, "license_type": "permissive", "methods": [{"docstring": "Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.", "name": "handle_post_file", "signature": "def handle_post_file(self, post_file) -> Tuple[Object, bool]"}, {"docstring": "Create Upload objects from request", "name": "post", "signature": "def post(self, request: HttpRequest) -> HttpResponse"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007298", "prompt": "Implement the Python class `BrowserObjectView` described below.\n\nClass description:\nHandle uploads from browser\n\nMethod signatures and docstrings:\n- def handle_post_file(self, post_file) -> Tuple[Object, bool]: Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\n- def post(self, request: HttpRequest) -> HttpResponse: Create Upload objects from request", "prompted_full_text": "Implement the Python class `BrowserObjectView` described below.\n\nClass description:\nHandle uploads from browser\n\nMethod signatures and docstrings:\n- def handle_post_file(self, post_file) -> Tuple[Object, bool]: Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\n- def post(self, request: HttpRequest) -> HttpResponse: Create Upload objects from request\n\n<|skeleton|>\nclass BrowserObjectView:\n \"\"\"Handle uploads from browser\"\"\"\n\n def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n \"\"\"Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Create Upload objects from request\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _, ext = os.path.splitext(post_file.name)\n ext = ext[1:] if ext.startswith('.') else ext\n hashes = generate_hashes(post_file)\n post_file.seek(0)\n existing = Object.objects.filter(sha512=hashes.get('sha512'))\n if existing.exists():\n LOGGER.debug('De-duped existing upload %s', existing.first().filename)\n return (existing.first(), False)\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info('Uploaded %s', new_upload.filename)\n return (new_upload, True)\n<|end_body_0|>\n\n<|body_start_1|>\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info('Uploaded %s', new_upload.filename)\n return HttpResponse(status=204)\n<|end_body_1|>\n", "revision_id": "84bf18262af59e45502a9e862d1a85c5cecd63ac", "skeleton": "<|skeleton|>\nclass BrowserObjectView:\n \"\"\"Handle uploads from browser\"\"\"\n\n def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n \"\"\"Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\"\"\"\n <|body_0|>\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Create Upload objects from request\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BrowserObjectView:\n \"\"\"Handle uploads from browser\"\"\"\n\n def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n \"\"\"Handle upload of a single file, computes hashes and returns existing Upload instance and False as tuple if file was uploaded already. Otherwise, new Upload instance is created and returned in a tuple with True.\"\"\"\n _, ext = os.path.splitext(post_file.name)\n ext = ext[1:] if ext.startswith('.') else ext\n hashes = generate_hashes(post_file)\n post_file.seek(0)\n existing = Object.objects.filter(sha512=hashes.get('sha512'))\n if existing.exists():\n LOGGER.debug('De-duped existing upload %s', existing.first().filename)\n return (existing.first(), False)\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info('Uploaded %s', new_upload.filename)\n return (new_upload, True)\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Create Upload objects from request\"\"\"\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info('Uploaded %s', new_upload.filename)\n return HttpResponse(status=204)\n", "source": "the_stack_v2_python_sparse", "source_path": "pyazo/core/views/upload.py", "source_repo": "BeryJu/pyazo", "split": "val", "star_events_count": 5} {"blob_id": "a5604031fc7b746b59e7a1594377c66c93964b2a", "bodies": ["mojom = 'foo.mojom'\nself.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')\nwith self.assertRaisesRegexp(Exception, 'must specify a \\\\[Default\\\\]'):\n self.ParseMojoms([mojom])", "mojom = 'foo.mojom'\nself.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] bool x;\\n [Default] bool y;\\n };\\n ')\nwith self.assertRaisesRegexp(Exception, 'Multiple \\\\[Default\\\\] fields'):\n self.ParseMojoms([mojom])", "mojom = 'foo.mojom'\nself.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] handle p;\\n };\\n ')\nwith self.assertRaisesRegexp(Exception, 'must be nullable or integral'):\n self.ParseMojoms([mojom])"], "bodies_text": "<|body_start_0|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')\n with self.assertRaisesRegexp(Exception, 'must specify a \\\\[Default\\\\]'):\n self.ParseMojoms([mojom])\n<|end_body_0|>\n\n<|body_start_1|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] bool x;\\n [Default] bool y;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'Multiple \\\\[Default\\\\] fields'):\n self.ParseMojoms([mojom])\n<|end_body_1|>\n\n<|body_start_2|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] handle p;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'must be nullable or integral'):\n self.ParseMojoms([mojom])\n<|end_body_2|>\n", "class_docstring": "Tests union parsing behavior.", "class_name": "UnionTest", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UnionTest:\n \"\"\"Tests union parsing behavior.\"\"\"\n\n def testExtensibleMustHaveDefault(self):\n \"\"\"Verifies that extensible unions must have a default field.\"\"\"\n <|body_0|>\n\n def testExtensibleSingleDefault(self):\n \"\"\"Verifies that extensible unions must not have multiple default fields.\"\"\"\n <|body_1|>\n\n def testExtensibleDefaultTypeValid(self):\n \"\"\"Verifies that an extensible union's default field must be nullable or integral type.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')\n with self.assertRaisesRegexp(Exception, 'must specify a \\\\[Default\\\\]'):\n self.ParseMojoms([mojom])\n<|end_body_0|>\n\n<|body_start_1|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] bool x;\\n [Default] bool y;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'Multiple \\\\[Default\\\\] fields'):\n self.ParseMojoms([mojom])\n<|end_body_1|>\n\n<|body_start_2|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] handle p;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'must be nullable or integral'):\n self.ParseMojoms([mojom])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000164", "length_bytes": 1532, "license_type": "permissive", "methods": [{"docstring": "Verifies that extensible unions must have a default field.", "name": "testExtensibleMustHaveDefault", "signature": "def testExtensibleMustHaveDefault(self)"}, {"docstring": "Verifies that extensible unions must not have multiple default fields.", "name": "testExtensibleSingleDefault", "signature": "def testExtensibleSingleDefault(self)"}, {"docstring": "Verifies that an extensible union's default field must be nullable or integral type.", "name": "testExtensibleDefaultTypeValid", "signature": "def testExtensibleDefaultTypeValid(self)"}], "n_methods": 3, "prompt": "Implement the Python class `UnionTest` described below.\n\nClass description:\nTests union parsing behavior.\n\nMethod signatures and docstrings:\n- def testExtensibleMustHaveDefault(self): Verifies that extensible unions must have a default field.\n- def testExtensibleSingleDefault(self): Verifies that extensible unions must not have multiple default fields.\n- def testExtensibleDefaultTypeValid(self): Verifies that an extensible union's default field must be nullable or integral type.", "prompted_full_text": "Implement the Python class `UnionTest` described below.\n\nClass description:\nTests union parsing behavior.\n\nMethod signatures and docstrings:\n- def testExtensibleMustHaveDefault(self): Verifies that extensible unions must have a default field.\n- def testExtensibleSingleDefault(self): Verifies that extensible unions must not have multiple default fields.\n- def testExtensibleDefaultTypeValid(self): Verifies that an extensible union's default field must be nullable or integral type.\n\n<|skeleton|>\nclass UnionTest:\n \"\"\"Tests union parsing behavior.\"\"\"\n\n def testExtensibleMustHaveDefault(self):\n \"\"\"Verifies that extensible unions must have a default field.\"\"\"\n <|body_0|>\n\n def testExtensibleSingleDefault(self):\n \"\"\"Verifies that extensible unions must not have multiple default fields.\"\"\"\n <|body_1|>\n\n def testExtensibleDefaultTypeValid(self):\n \"\"\"Verifies that an extensible union's default field must be nullable or integral type.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')\n with self.assertRaisesRegexp(Exception, 'must specify a \\\\[Default\\\\]'):\n self.ParseMojoms([mojom])\n<|end_body_0|>\n\n<|body_start_1|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] bool x;\\n [Default] bool y;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'Multiple \\\\[Default\\\\] fields'):\n self.ParseMojoms([mojom])\n<|end_body_1|>\n\n<|body_start_2|>\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] handle p;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'must be nullable or integral'):\n self.ParseMojoms([mojom])\n<|end_body_2|>\n", "revision_id": "a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c", "skeleton": "<|skeleton|>\nclass UnionTest:\n \"\"\"Tests union parsing behavior.\"\"\"\n\n def testExtensibleMustHaveDefault(self):\n \"\"\"Verifies that extensible unions must have a default field.\"\"\"\n <|body_0|>\n\n def testExtensibleSingleDefault(self):\n \"\"\"Verifies that extensible unions must not have multiple default fields.\"\"\"\n <|body_1|>\n\n def testExtensibleDefaultTypeValid(self):\n \"\"\"Verifies that an extensible union's default field must be nullable or integral type.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UnionTest:\n \"\"\"Tests union parsing behavior.\"\"\"\n\n def testExtensibleMustHaveDefault(self):\n \"\"\"Verifies that extensible unions must have a default field.\"\"\"\n mojom = 'foo.mojom'\n self.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')\n with self.assertRaisesRegexp(Exception, 'must specify a \\\\[Default\\\\]'):\n self.ParseMojoms([mojom])\n\n def testExtensibleSingleDefault(self):\n \"\"\"Verifies that extensible unions must not have multiple default fields.\"\"\"\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] bool x;\\n [Default] bool y;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'Multiple \\\\[Default\\\\] fields'):\n self.ParseMojoms([mojom])\n\n def testExtensibleDefaultTypeValid(self):\n \"\"\"Verifies that an extensible union's default field must be nullable or integral type.\"\"\"\n mojom = 'foo.mojom'\n self.WriteFile(mojom, ' module foo;\\n [Extensible] union U {\\n [Default] handle p;\\n };\\n ')\n with self.assertRaisesRegexp(Exception, 'must be nullable or integral'):\n self.ParseMojoms([mojom])\n", "source": "the_stack_v2_python_sparse", "source_path": "mojo/public/tools/mojom/union_unittest.py", "source_repo": "chromium/chromium", "split": "val", "star_events_count": 17408} {"blob_id": "58aa43c4b3a65689074bc53d4bafef1370ebf0e8", "bodies": ["def _build(pre_start, pre_end, in_start, in_end):\n if pre_start == pre_end:\n return None\n root_val = preorder[pre_start]\n idx = inorder[in_start:in_end].index(root_val)\n root_node = TreeNode(root_val)\n left_node = _build(pre_start + 1, pre_start + 1 + idx, in_start, in_start + idx)\n right_node = _build(pre_start + idx + 1, pre_end, in_start + idx + 1, in_end)\n root_node.left = left_node\n root_node.right = right_node\n return root_node\nreturn _build(0, len(preorder), 0, len(inorder))", "if not preorder:\n return None\nroot = preorder[0]\nidx = inorder.index(root)\nroot_node = TreeNode(root)\nleft_node = self.buildTree(preorder[1:idx + 1], inorder[:idx])\nright_node = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\nroot_node.left = left_node\nroot_node.right = right_node\nreturn root_node"], "bodies_text": "<|body_start_0|>\n def _build(pre_start, pre_end, in_start, in_end):\n if pre_start == pre_end:\n return None\n root_val = preorder[pre_start]\n idx = inorder[in_start:in_end].index(root_val)\n root_node = TreeNode(root_val)\n left_node = _build(pre_start + 1, pre_start + 1 + idx, in_start, in_start + idx)\n right_node = _build(pre_start + idx + 1, pre_end, in_start + idx + 1, in_end)\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n return _build(0, len(preorder), 0, len(inorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = preorder[0]\n idx = inorder.index(root)\n root_node = TreeNode(root)\n left_node = self.buildTree(preorder[1:idx + 1], inorder[:idx])\n right_node = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTruee(self, preorder, inorder):\n \"\"\"memory limit exceeded %>_<%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def _build(pre_start, pre_end, in_start, in_end):\n if pre_start == pre_end:\n return None\n root_val = preorder[pre_start]\n idx = inorder[in_start:in_end].index(root_val)\n root_node = TreeNode(root_val)\n left_node = _build(pre_start + 1, pre_start + 1 + idx, in_start, in_start + idx)\n right_node = _build(pre_start + idx + 1, pre_end, in_start + idx + 1, in_end)\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n return _build(0, len(preorder), 0, len(inorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = preorder[0]\n idx = inorder.index(root)\n root_node = TreeNode(root)\n left_node = self.buildTree(preorder[1:idx + 1], inorder[:idx])\n right_node = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000165", "length_bytes": 1597, "license_type": "no_license", "methods": [{"docstring": ":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode", "name": "buildTree", "signature": "def buildTree(self, preorder, inorder)"}, {"docstring": "memory limit exceeded %>_<%", "name": "buildTruee", "signature": "def buildTruee(self, preorder, inorder)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def buildTree(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\n- def buildTruee(self, preorder, inorder): memory limit exceeded %>_<%", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def buildTree(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\n- def buildTruee(self, preorder, inorder): memory limit exceeded %>_<%\n\n<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTruee(self, preorder, inorder):\n \"\"\"memory limit exceeded %>_<%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def _build(pre_start, pre_end, in_start, in_end):\n if pre_start == pre_end:\n return None\n root_val = preorder[pre_start]\n idx = inorder[in_start:in_end].index(root_val)\n root_node = TreeNode(root_val)\n left_node = _build(pre_start + 1, pre_start + 1 + idx, in_start, in_start + idx)\n right_node = _build(pre_start + idx + 1, pre_end, in_start + idx + 1, in_end)\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n return _build(0, len(preorder), 0, len(inorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = preorder[0]\n idx = inorder.index(root)\n root_node = TreeNode(root)\n left_node = self.buildTree(preorder[1:idx + 1], inorder[:idx])\n right_node = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n<|end_body_1|>\n", "revision_id": "b7f85afe1c69f34f8c6025881224ae79042850d3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTruee(self, preorder, inorder):\n \"\"\"memory limit exceeded %>_<%\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n def _build(pre_start, pre_end, in_start, in_end):\n if pre_start == pre_end:\n return None\n root_val = preorder[pre_start]\n idx = inorder[in_start:in_end].index(root_val)\n root_node = TreeNode(root_val)\n left_node = _build(pre_start + 1, pre_start + 1 + idx, in_start, in_start + idx)\n right_node = _build(pre_start + idx + 1, pre_end, in_start + idx + 1, in_end)\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n return _build(0, len(preorder), 0, len(inorder))\n\n def buildTruee(self, preorder, inorder):\n \"\"\"memory limit exceeded %>_<%\"\"\"\n if not preorder:\n return None\n root = preorder[0]\n idx = inorder.index(root)\n root_node = TreeNode(root)\n left_node = self.buildTree(preorder[1:idx + 1], inorder[:idx])\n right_node = self.buildTree(preorder[idx + 1:], inorder[idx + 1:])\n root_node.left = left_node\n root_node.right = right_node\n return root_node\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithms/105. Construct Binary Tree from Preorder and Inorder Traversal/main.py", "source_repo": "GTxx/leetcode", "split": "val", "star_events_count": 1} {"blob_id": "600b474162e535fa590fe388601d73e476261085", "bodies": ["super().__init__()\nself.add_module('conv1', Conv(in_channels=in_feats, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\nself.add_module('norm1', Norm(in_feats // 2))\nself.add_module('relu1', nn.ReLU(inplace=True))\nself.add_module('conv2', Conv(in_channels=in_feats // 2, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\nself.add_module('norm2', Norm(in_feats // 2))\nself.add_module('relu2', nn.ReLU(inplace=True))\nself.add_module('deconv', Deconv(in_channels=in_feats // 2, out_channels=in_feats // 4, kernel_size=2, stride=2, padding=0, output_padding=0))", "x = self.conv1(x)\nx = self.norm1(x)\nx = self.relu1(x)\nx = self.conv2(x)\nx = self.norm2(x)\nx = self.relu2(x)\nx = self.deconv(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.add_module('conv1', Conv(in_channels=in_feats, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm1', Norm(in_feats // 2))\n self.add_module('relu1', nn.ReLU(inplace=True))\n self.add_module('conv2', Conv(in_channels=in_feats // 2, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm2', Norm(in_feats // 2))\n self.add_module('relu2', nn.ReLU(inplace=True))\n self.add_module('deconv', Deconv(in_channels=in_feats // 2, out_channels=in_feats // 4, kernel_size=2, stride=2, padding=0, output_padding=0))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.norm2(x)\n x = self.relu2(x)\n x = self.deconv(x)\n return x\n<|end_body_1|>\n", "class_docstring": "", "class_name": "_UpsampleBlock", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _UpsampleBlock:\n\n def __init__(self, in_feats, Norm, Conv, Deconv):\n \"\"\"Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.add_module('conv1', Conv(in_channels=in_feats, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm1', Norm(in_feats // 2))\n self.add_module('relu1', nn.ReLU(inplace=True))\n self.add_module('conv2', Conv(in_channels=in_feats // 2, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm2', Norm(in_feats // 2))\n self.add_module('relu2', nn.ReLU(inplace=True))\n self.add_module('deconv', Deconv(in_channels=in_feats // 2, out_channels=in_feats // 4, kernel_size=2, stride=2, padding=0, output_padding=0))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.norm2(x)\n x = self.relu2(x)\n x = self.deconv(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000166", "length_bytes": 24719, "license_type": "permissive", "methods": [{"docstring": "Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.", "name": "__init__", "signature": "def __init__(self, in_feats, Norm, Conv, Deconv)"}, {"docstring": "Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000224", "prompt": "Implement the Python class `_UpsampleBlock` described below.\n\nClass description:\nImplement the _UpsampleBlock class.\n\nMethod signatures and docstrings:\n- def __init__(self, in_feats, Norm, Conv, Deconv): Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\n- def forward(self, x): Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).", "prompted_full_text": "Implement the Python class `_UpsampleBlock` described below.\n\nClass description:\nImplement the _UpsampleBlock class.\n\nMethod signatures and docstrings:\n- def __init__(self, in_feats, Norm, Conv, Deconv): Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\n- def forward(self, x): Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).\n\n<|skeleton|>\nclass _UpsampleBlock:\n\n def __init__(self, in_feats, Norm, Conv, Deconv):\n \"\"\"Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.add_module('conv1', Conv(in_channels=in_feats, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm1', Norm(in_feats // 2))\n self.add_module('relu1', nn.ReLU(inplace=True))\n self.add_module('conv2', Conv(in_channels=in_feats // 2, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm2', Norm(in_feats // 2))\n self.add_module('relu2', nn.ReLU(inplace=True))\n self.add_module('deconv', Deconv(in_channels=in_feats // 2, out_channels=in_feats // 4, kernel_size=2, stride=2, padding=0, output_padding=0))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.norm2(x)\n x = self.relu2(x)\n x = self.deconv(x)\n return x\n<|end_body_1|>\n", "revision_id": "72eb99f68205afd5f8d49a3bb6cfc08cfd467582", "skeleton": "<|skeleton|>\nclass _UpsampleBlock:\n\n def __init__(self, in_feats, Norm, Conv, Deconv):\n \"\"\"Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _UpsampleBlock:\n def __init__(self, in_feats, Norm, Conv, Deconv):\n \"\"\"Args: in_feats (int): Number of input channels. Norm (nn.Module): Normalization layer constructor. Conv (nn.Module): Convolutional layer constructor. Deconv (nn.Module): Transposed convolutional layer constructor.\"\"\"\n super().__init__()\n self.add_module('conv1', Conv(in_channels=in_feats, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm1', Norm(in_feats // 2))\n self.add_module('relu1', nn.ReLU(inplace=True))\n self.add_module('conv2', Conv(in_channels=in_feats // 2, out_channels=in_feats // 2, kernel_size=3, stride=1, padding=1, bias=False))\n self.add_module('norm2', Norm(in_feats // 2))\n self.add_module('relu2', nn.ReLU(inplace=True))\n self.add_module('deconv', Deconv(in_channels=in_feats // 2, out_channels=in_feats // 4, kernel_size=2, stride=2, padding=0, output_padding=0))\n\n def forward(self, x):\n \"\"\"Performs a forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor of shape (batch_size, in_feats, height, width). Returns: torch.Tensor: Output tensor of shape (batch_size, in_feats//4, 2*height, 2*width).\"\"\"\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.norm2(x)\n x = self.relu2(x)\n x = self.deconv(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "GANDLF/models/unetr.py", "source_repo": "mlcommons/GaNDLF", "split": "val", "star_events_count": 45} {"blob_id": "b2f0faf6733885316a14c0f06744044a03c59255", "bodies": ["task_id = request.GET.get('task_id', '')\nif not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\nresp = query_dtable_io_status(task_id)\nif resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\nif not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\nis_finished = json.loads(resp.content)['is_finished']\nreturn Response({'is_finished': is_finished})", "task_id = request.query_params.get('task_id', '')\nif not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\ntask_type = request.query_params.get('task_type', '')\nif task_type not in ['export', 'import']:\n error_msg = 'task_type invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\ndtable_uuid = request.query_params.get('dtable_uuid', '')\nif not dtable_uuid:\n error_msg = 'dtable_uuid invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\nresp = cancel_dtable_io_task(task_id, dtable_uuid)\nif resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\nif not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\nif task_type == 'import':\n try:\n dtable = DTables.objects.get(uuid=dtable_uuid)\n dtable.delete()\n except Exception:\n pass\ntmp_dir = os.path.join('/tmp/dtable-io', dtable_uuid)\nif os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\nreturn Response({'success': True})"], "bodies_text": "<|body_start_0|>\n task_id = request.GET.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = query_dtable_io_status(task_id)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n is_finished = json.loads(resp.content)['is_finished']\n return Response({'is_finished': is_finished})\n<|end_body_0|>\n\n<|body_start_1|>\n task_id = request.query_params.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n task_type = request.query_params.get('task_type', '')\n if task_type not in ['export', 'import']:\n error_msg = 'task_type invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n dtable_uuid = request.query_params.get('dtable_uuid', '')\n if not dtable_uuid:\n error_msg = 'dtable_uuid invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = cancel_dtable_io_task(task_id, dtable_uuid)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n if task_type == 'import':\n try:\n dtable = DTables.objects.get(uuid=dtable_uuid)\n dtable.delete()\n except Exception:\n pass\n tmp_dir = os.path.join('/tmp/dtable-io', dtable_uuid)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n return Response({'success': True})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DTableIOStatus", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DTableIOStatus:\n\n def get(self, request):\n \"\"\"Get task status by task id :param request: :return:\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Delete task by task_id :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task_id = request.GET.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = query_dtable_io_status(task_id)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n is_finished = json.loads(resp.content)['is_finished']\n return Response({'is_finished': is_finished})\n<|end_body_0|>\n\n<|body_start_1|>\n task_id = request.query_params.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n task_type = request.query_params.get('task_type', '')\n if task_type not in ['export', 'import']:\n error_msg = 'task_type invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n dtable_uuid = request.query_params.get('dtable_uuid', '')\n if not dtable_uuid:\n error_msg = 'dtable_uuid invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = cancel_dtable_io_task(task_id, dtable_uuid)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n if task_type == 'import':\n try:\n dtable = DTables.objects.get(uuid=dtable_uuid)\n dtable.delete()\n except Exception:\n pass\n tmp_dir = os.path.join('/tmp/dtable-io', dtable_uuid)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n return Response({'success': True})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000167", "length_bytes": 12942, "license_type": "no_license", "methods": [{"docstring": "Get task status by task id :param request: :return:", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Delete task by task_id :param request: :return:", "name": "delete", "signature": "def delete(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003365", "prompt": "Implement the Python class `DTableIOStatus` described below.\n\nClass description:\nImplement the DTableIOStatus class.\n\nMethod signatures and docstrings:\n- def get(self, request): Get task status by task id :param request: :return:\n- def delete(self, request): Delete task by task_id :param request: :return:", "prompted_full_text": "Implement the Python class `DTableIOStatus` described below.\n\nClass description:\nImplement the DTableIOStatus class.\n\nMethod signatures and docstrings:\n- def get(self, request): Get task status by task id :param request: :return:\n- def delete(self, request): Delete task by task_id :param request: :return:\n\n<|skeleton|>\nclass DTableIOStatus:\n\n def get(self, request):\n \"\"\"Get task status by task id :param request: :return:\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Delete task by task_id :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n task_id = request.GET.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = query_dtable_io_status(task_id)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n is_finished = json.loads(resp.content)['is_finished']\n return Response({'is_finished': is_finished})\n<|end_body_0|>\n\n<|body_start_1|>\n task_id = request.query_params.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n task_type = request.query_params.get('task_type', '')\n if task_type not in ['export', 'import']:\n error_msg = 'task_type invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n dtable_uuid = request.query_params.get('dtable_uuid', '')\n if not dtable_uuid:\n error_msg = 'dtable_uuid invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = cancel_dtable_io_task(task_id, dtable_uuid)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n if task_type == 'import':\n try:\n dtable = DTables.objects.get(uuid=dtable_uuid)\n dtable.delete()\n except Exception:\n pass\n tmp_dir = os.path.join('/tmp/dtable-io', dtable_uuid)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n return Response({'success': True})\n<|end_body_1|>\n", "revision_id": "3d08b64bf2a3724326eab9dfa771863bc6743bc2", "skeleton": "<|skeleton|>\nclass DTableIOStatus:\n\n def get(self, request):\n \"\"\"Get task status by task id :param request: :return:\"\"\"\n <|body_0|>\n\n def delete(self, request):\n \"\"\"Delete task by task_id :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DTableIOStatus:\n def get(self, request):\n \"\"\"Get task status by task id :param request: :return:\"\"\"\n task_id = request.GET.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = query_dtable_io_status(task_id)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n is_finished = json.loads(resp.content)['is_finished']\n return Response({'is_finished': is_finished})\n\n def delete(self, request):\n \"\"\"Delete task by task_id :param request: :return:\"\"\"\n task_id = request.query_params.get('task_id', '')\n if not task_id:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n task_type = request.query_params.get('task_type', '')\n if task_type not in ['export', 'import']:\n error_msg = 'task_type invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n dtable_uuid = request.query_params.get('dtable_uuid', '')\n if not dtable_uuid:\n error_msg = 'dtable_uuid invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n resp = cancel_dtable_io_task(task_id, dtable_uuid)\n if resp.status_code == 400:\n error_msg = 'task_id invalid.'\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if not resp.ok:\n logger.error(resp.content)\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')\n if task_type == 'import':\n try:\n dtable = DTables.objects.get(uuid=dtable_uuid)\n dtable.delete()\n except Exception:\n pass\n tmp_dir = os.path.join('/tmp/dtable-io', dtable_uuid)\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n return Response({'success': True})\n", "source": "the_stack_v2_python_sparse", "source_path": "seahub/api2/endpoints/dtable_io.py", "source_repo": "flazx/dtable-web", "split": "val", "star_events_count": 0} {"blob_id": "3ed3271ee557eab71bd20744e2844954fe54f01a", "bodies": ["super().__init__(coordinator)\nself.entity_description = description\nself._attr_unique_id = f'{DOMAIN}_{coordinator.data.agreement.agreement_id}_binary_sensor_{description.key}'", "section = getattr(self.coordinator.data, self.entity_description.section)\nvalue = getattr(section, self.entity_description.measurement)\nif value is None:\n return None\nif self.entity_description.inverted:\n return not value\nreturn value"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator)\n self.entity_description = description\n self._attr_unique_id = f'{DOMAIN}_{coordinator.data.agreement.agreement_id}_binary_sensor_{description.key}'\n<|end_body_0|>\n\n<|body_start_1|>\n section = getattr(self.coordinator.data, self.entity_description.section)\n value = getattr(section, self.entity_description.measurement)\n if value is None:\n return None\n if self.entity_description.inverted:\n return not value\n return value\n<|end_body_1|>\n", "class_docstring": "Defines an Toon binary sensor.", "class_name": "ToonBinarySensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ToonBinarySensor:\n \"\"\"Defines an Toon binary sensor.\"\"\"\n\n def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None:\n \"\"\"Initialize the Toon sensor.\"\"\"\n <|body_0|>\n\n def is_on(self) -> bool | None:\n \"\"\"Return the status of the binary sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self.entity_description = description\n self._attr_unique_id = f'{DOMAIN}_{coordinator.data.agreement.agreement_id}_binary_sensor_{description.key}'\n<|end_body_0|>\n\n<|body_start_1|>\n section = getattr(self.coordinator.data, self.entity_description.section)\n value = getattr(section, self.entity_description.measurement)\n if value is None:\n return None\n if self.entity_description.inverted:\n return not value\n return value\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000168", "length_bytes": 5719, "license_type": "permissive", "methods": [{"docstring": "Initialize the Toon sensor.", "name": "__init__", "signature": "def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None"}, {"docstring": "Return the status of the binary sensor.", "name": "is_on", "signature": "def is_on(self) -> bool | None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001904", "prompt": "Implement the Python class `ToonBinarySensor` described below.\n\nClass description:\nDefines an Toon binary sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None: Initialize the Toon sensor.\n- def is_on(self) -> bool | None: Return the status of the binary sensor.", "prompted_full_text": "Implement the Python class `ToonBinarySensor` described below.\n\nClass description:\nDefines an Toon binary sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None: Initialize the Toon sensor.\n- def is_on(self) -> bool | None: Return the status of the binary sensor.\n\n<|skeleton|>\nclass ToonBinarySensor:\n \"\"\"Defines an Toon binary sensor.\"\"\"\n\n def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None:\n \"\"\"Initialize the Toon sensor.\"\"\"\n <|body_0|>\n\n def is_on(self) -> bool | None:\n \"\"\"Return the status of the binary sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self.entity_description = description\n self._attr_unique_id = f'{DOMAIN}_{coordinator.data.agreement.agreement_id}_binary_sensor_{description.key}'\n<|end_body_0|>\n\n<|body_start_1|>\n section = getattr(self.coordinator.data, self.entity_description.section)\n value = getattr(section, self.entity_description.measurement)\n if value is None:\n return None\n if self.entity_description.inverted:\n return not value\n return value\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass ToonBinarySensor:\n \"\"\"Defines an Toon binary sensor.\"\"\"\n\n def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None:\n \"\"\"Initialize the Toon sensor.\"\"\"\n <|body_0|>\n\n def is_on(self) -> bool | None:\n \"\"\"Return the status of the binary sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ToonBinarySensor:\n \"\"\"Defines an Toon binary sensor.\"\"\"\n\n def __init__(self, coordinator: ToonDataUpdateCoordinator, description: ToonBinarySensorEntityDescription) -> None:\n \"\"\"Initialize the Toon sensor.\"\"\"\n super().__init__(coordinator)\n self.entity_description = description\n self._attr_unique_id = f'{DOMAIN}_{coordinator.data.agreement.agreement_id}_binary_sensor_{description.key}'\n\n def is_on(self) -> bool | None:\n \"\"\"Return the status of the binary sensor.\"\"\"\n section = getattr(self.coordinator.data, self.entity_description.section)\n value = getattr(section, self.entity_description.measurement)\n if value is None:\n return None\n if self.entity_description.inverted:\n return not value\n return value\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/toon/binary_sensor.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "ae3c32b93921c5df08d556c4b36f8d4678ca8993", "bodies": ["self.matrix = dict()\nself.max_num = maxChoosableInteger\nif (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal:\n return False\nif maxChoosableInteger >= desiredTotal:\n return True\nalways_win = False\nfor i in range(1, maxChoosableInteger + 1):\n result = self.dfs(1 << i, desiredTotal - i)\n if result != 1:\n always_win = True\nreturn always_win", "return_data = 0\nfor i in range(1, self.max_num + 1)[::-1]:\n if choose ^ 2 ** i < choose:\n continue\n next_choose = choose ^ 2 ** i\n if next_choose in self.matrix:\n return self.matrix[next_choose]\n if i >= target:\n self.matrix[choose] = 1\n return 1\n else:\n next_target = target - i\n result = self.dfs(next_choose, next_target)\n if result != 1:\n return_data = 1\n break\nself.matrix[choose] = return_data\nreturn return_data"], "bodies_text": "<|body_start_0|>\n self.matrix = dict()\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n always_win = False\n for i in range(1, maxChoosableInteger + 1):\n result = self.dfs(1 << i, desiredTotal - i)\n if result != 1:\n always_win = True\n return always_win\n<|end_body_0|>\n\n<|body_start_1|>\n return_data = 0\n for i in range(1, self.max_num + 1)[::-1]:\n if choose ^ 2 ** i < choose:\n continue\n next_choose = choose ^ 2 ** i\n if next_choose in self.matrix:\n return self.matrix[next_choose]\n if i >= target:\n self.matrix[choose] = 1\n return 1\n else:\n next_target = target - i\n result = self.dfs(next_choose, next_target)\n if result != 1:\n return_data = 1\n break\n self.matrix[choose] = return_data\n return return_data\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SolutionError", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SolutionError:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, choose, target):\n \"\"\":type choose: int :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = dict()\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n always_win = False\n for i in range(1, maxChoosableInteger + 1):\n result = self.dfs(1 << i, desiredTotal - i)\n if result != 1:\n always_win = True\n return always_win\n<|end_body_0|>\n\n<|body_start_1|>\n return_data = 0\n for i in range(1, self.max_num + 1)[::-1]:\n if choose ^ 2 ** i < choose:\n continue\n next_choose = choose ^ 2 ** i\n if next_choose in self.matrix:\n return self.matrix[next_choose]\n if i >= target:\n self.matrix[choose] = 1\n return 1\n else:\n next_target = target - i\n result = self.dfs(next_choose, next_target)\n if result != 1:\n return_data = 1\n break\n self.matrix[choose] = return_data\n return return_data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000169", "length_bytes": 4497, "license_type": "no_license", "methods": [{"docstring": ":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool", "name": "canIWin", "signature": "def canIWin(self, maxChoosableInteger, desiredTotal)"}, {"docstring": ":type choose: int :type target: int :rtype: int", "name": "dfs", "signature": "def dfs(self, choose, target)"}], "n_methods": 2, "prompt": "Implement the Python class `SolutionError` described below.\n\nClass description:\nImplement the SolutionError class.\n\nMethod signatures and docstrings:\n- def canIWin(self, maxChoosableInteger, desiredTotal): :type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\n- def dfs(self, choose, target): :type choose: int :type target: int :rtype: int", "prompted_full_text": "Implement the Python class `SolutionError` described below.\n\nClass description:\nImplement the SolutionError class.\n\nMethod signatures and docstrings:\n- def canIWin(self, maxChoosableInteger, desiredTotal): :type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\n- def dfs(self, choose, target): :type choose: int :type target: int :rtype: int\n\n<|skeleton|>\nclass SolutionError:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, choose, target):\n \"\"\":type choose: int :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = dict()\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n always_win = False\n for i in range(1, maxChoosableInteger + 1):\n result = self.dfs(1 << i, desiredTotal - i)\n if result != 1:\n always_win = True\n return always_win\n<|end_body_0|>\n\n<|body_start_1|>\n return_data = 0\n for i in range(1, self.max_num + 1)[::-1]:\n if choose ^ 2 ** i < choose:\n continue\n next_choose = choose ^ 2 ** i\n if next_choose in self.matrix:\n return self.matrix[next_choose]\n if i >= target:\n self.matrix[choose] = 1\n return 1\n else:\n next_target = target - i\n result = self.dfs(next_choose, next_target)\n if result != 1:\n return_data = 1\n break\n self.matrix[choose] = return_data\n return return_data\n<|end_body_1|>\n", "revision_id": "f832227c4d0e0b1c0cc326561187004ef24e2a68", "skeleton": "<|skeleton|>\nclass SolutionError:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, choose, target):\n \"\"\":type choose: int :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SolutionError:\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n self.matrix = dict()\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n always_win = False\n for i in range(1, maxChoosableInteger + 1):\n result = self.dfs(1 << i, desiredTotal - i)\n if result != 1:\n always_win = True\n return always_win\n\n def dfs(self, choose, target):\n \"\"\":type choose: int :type target: int :rtype: int\"\"\"\n return_data = 0\n for i in range(1, self.max_num + 1)[::-1]:\n if choose ^ 2 ** i < choose:\n continue\n next_choose = choose ^ 2 ** i\n if next_choose in self.matrix:\n return self.matrix[next_choose]\n if i >= target:\n self.matrix[choose] = 1\n return 1\n else:\n next_target = target - i\n result = self.dfs(next_choose, next_target)\n if result != 1:\n return_data = 1\n break\n self.matrix[choose] = return_data\n return return_data\n", "source": "the_stack_v2_python_sparse", "source_path": "464.py", "source_repo": "Gackle/leetcode_practice", "split": "val", "star_events_count": 0} {"blob_id": "93516bf860a87190dee719d7d8fc5341eb3ab589", "bodies": ["if cls.USE_PLUGIN_MANAGER:\n return set(cls.get_available_plugins().values())\nelse:\n return set()", "hash_obj = sha1()\nsorted_transformers = sorted(cls.get_registered_transformers(), key=lambda t: t.name())\nfor transformer in sorted_transformers:\n hash_obj.update(transformer.name().encode())\n hash_obj.update(str(transformer.WRITE_VERSION).encode())\nreturn b64encode(hash_obj.digest()).decode('utf-8')", "registered_transformer_names = {reg_trans.name() for reg_trans in cls.get_registered_transformers()}\nrequested_transformer_names = {transformer.name() for transformer in transformers}\nreturn requested_transformer_names - registered_transformer_names"], "bodies_text": "<|body_start_0|>\n if cls.USE_PLUGIN_MANAGER:\n return set(cls.get_available_plugins().values())\n else:\n return set()\n<|end_body_0|>\n\n<|body_start_1|>\n hash_obj = sha1()\n sorted_transformers = sorted(cls.get_registered_transformers(), key=lambda t: t.name())\n for transformer in sorted_transformers:\n hash_obj.update(transformer.name().encode())\n hash_obj.update(str(transformer.WRITE_VERSION).encode())\n return b64encode(hash_obj.digest()).decode('utf-8')\n<|end_body_1|>\n\n<|body_start_2|>\n registered_transformer_names = {reg_trans.name() for reg_trans in cls.get_registered_transformers()}\n requested_transformer_names = {transformer.name() for transformer in transformers}\n return requested_transformer_names - registered_transformer_names\n<|end_body_2|>\n", "class_docstring": "Registry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.", "class_name": "TransformerRegistry", "detected_licenses": ["AGPL-3.0-only", "AGPL-3.0-or-later", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TransformerRegistry:\n \"\"\"Registry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\"\"\"\n\n def get_registered_transformers(cls):\n \"\"\"Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\"\"\"\n <|body_0|>\n\n def get_write_version_hash(cls):\n \"\"\"Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\"\"\"\n <|body_1|>\n\n def find_unregistered(cls, transformers):\n \"\"\"Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cls.USE_PLUGIN_MANAGER:\n return set(cls.get_available_plugins().values())\n else:\n return set()\n<|end_body_0|>\n\n<|body_start_1|>\n hash_obj = sha1()\n sorted_transformers = sorted(cls.get_registered_transformers(), key=lambda t: t.name())\n for transformer in sorted_transformers:\n hash_obj.update(transformer.name().encode())\n hash_obj.update(str(transformer.WRITE_VERSION).encode())\n return b64encode(hash_obj.digest()).decode('utf-8')\n<|end_body_1|>\n\n<|body_start_2|>\n registered_transformer_names = {reg_trans.name() for reg_trans in cls.get_registered_transformers()}\n requested_transformer_names = {transformer.name() for transformer in transformers}\n return requested_transformer_names - registered_transformer_names\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000170", "length_bytes": 2371, "license_type": "permissive", "methods": [{"docstring": "Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.", "name": "get_registered_transformers", "signature": "def get_registered_transformers(cls)"}, {"docstring": "Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.", "name": "get_write_version_hash", "signature": "def get_write_version_hash(cls)"}, {"docstring": "Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.", "name": "find_unregistered", "signature": "def find_unregistered(cls, transformers)"}], "n_methods": 3, "prompt": "Implement the Python class `TransformerRegistry` described below.\n\nClass description:\nRegistry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\n\nMethod signatures and docstrings:\n- def get_registered_transformers(cls): Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\n- def get_write_version_hash(cls): Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\n- def find_unregistered(cls, transformers): Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.", "prompted_full_text": "Implement the Python class `TransformerRegistry` described below.\n\nClass description:\nRegistry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\n\nMethod signatures and docstrings:\n- def get_registered_transformers(cls): Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\n- def get_write_version_hash(cls): Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\n- def find_unregistered(cls, transformers): Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.\n\n<|skeleton|>\nclass TransformerRegistry:\n \"\"\"Registry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\"\"\"\n\n def get_registered_transformers(cls):\n \"\"\"Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\"\"\"\n <|body_0|>\n\n def get_write_version_hash(cls):\n \"\"\"Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\"\"\"\n <|body_1|>\n\n def find_unregistered(cls, transformers):\n \"\"\"Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cls.USE_PLUGIN_MANAGER:\n return set(cls.get_available_plugins().values())\n else:\n return set()\n<|end_body_0|>\n\n<|body_start_1|>\n hash_obj = sha1()\n sorted_transformers = sorted(cls.get_registered_transformers(), key=lambda t: t.name())\n for transformer in sorted_transformers:\n hash_obj.update(transformer.name().encode())\n hash_obj.update(str(transformer.WRITE_VERSION).encode())\n return b64encode(hash_obj.digest()).decode('utf-8')\n<|end_body_1|>\n\n<|body_start_2|>\n registered_transformer_names = {reg_trans.name() for reg_trans in cls.get_registered_transformers()}\n requested_transformer_names = {transformer.name() for transformer in transformers}\n return requested_transformer_names - registered_transformer_names\n<|end_body_2|>\n", "revision_id": "5809eaca7079a15ee56b0b7fcfea425337046c97", "skeleton": "<|skeleton|>\nclass TransformerRegistry:\n \"\"\"Registry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\"\"\"\n\n def get_registered_transformers(cls):\n \"\"\"Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\"\"\"\n <|body_0|>\n\n def get_write_version_hash(cls):\n \"\"\"Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\"\"\"\n <|body_1|>\n\n def find_unregistered(cls, transformers):\n \"\"\"Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TransformerRegistry:\n \"\"\"Registry for all of the block structure transformers that have been made available. All block structure transformers should implement `BlockStructureTransformer`.\"\"\"\n\n def get_registered_transformers(cls):\n \"\"\"Returns a set of all registered transformers. Returns: {BlockStructureTransformer} - All transformers that are registered with the platform's PluginManager.\"\"\"\n if cls.USE_PLUGIN_MANAGER:\n return set(cls.get_available_plugins().values())\n else:\n return set()\n\n def get_write_version_hash(cls):\n \"\"\"Returns a deterministic hash value of the WRITE_VERSION of all registered transformers.\"\"\"\n hash_obj = sha1()\n sorted_transformers = sorted(cls.get_registered_transformers(), key=lambda t: t.name())\n for transformer in sorted_transformers:\n hash_obj.update(transformer.name().encode())\n hash_obj.update(str(transformer.WRITE_VERSION).encode())\n return b64encode(hash_obj.digest()).decode('utf-8')\n\n def find_unregistered(cls, transformers):\n \"\"\"Find and returns the names of all the transformers from the given list that aren't registered with the platform's PluginManager. Arguments: transformers ([BlockStructureTransformer] - List of transformers to check in the registry. Returns: set([string]) - Set of names of a subset of the given transformers that weren't found in the registry.\"\"\"\n registered_transformer_names = {reg_trans.name() for reg_trans in cls.get_registered_transformers()}\n requested_transformer_names = {transformer.name() for transformer in transformers}\n return requested_transformer_names - registered_transformer_names\n", "source": "the_stack_v2_python_sparse", "source_path": "Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/content/block_structure/transformer_registry.py", "source_repo": "luque/better-ways-of-thinking-about-software", "split": "val", "star_events_count": 3} {"blob_id": "5236529a03a6554a9bcbc94899980dec3f92ea1e", "bodies": ["if transmission_type is not None:\n record['transmission_type'] = transmission_type\nalternative = record['alternative']\nattributes = record\nif attr_filter:\n attributes = {k: v for k, v in record.items() if k not in attr_filter}\nif 'summary_variant_index' in record:\n summary_index = record['summary_variant_index']\nelse:\n summary_index = record.get('summary_index')\nallele_index = record['allele_index']\nchrom = record['chrom']\nposition = record['position']\nend_position = record.get('end_position')\nreference = record['reference']\nalternative = record.get('alternative')\nallele_type = record.get('variant_type', None)\ntransmission_type = TransmissionType(record.get('transmission_type', TransmissionType.transmitted))\nif position is not None and end_position is not None and (reference is None) and (alternative is None) and (allele_type is None):\n allele_type = SummaryAllele.Type.position\nif allele_type is not None:\n allele_type = core.Allele.Type(allele_type)\nreturn SummaryAllele(chrom, position, reference, alternative=alternative, summary_index=summary_index, end_position=record.get('end_position', None), allele_type=allele_type, allele_index=allele_index, transmission_type=transmission_type, attributes=attributes)", "assert len(records) > 0\nalleles = []\nfor record in records:\n allele = SummaryVariantFactory.summary_allele_from_record(record, transmission_type=transmission_type, attr_filter=attr_filter)\n alleles.append(allele)\nif not alleles[0].is_reference_allele:\n ref_allele = SummaryAllele.create_reference_allele(alleles[0])\n alleles.insert(0, ref_allele)\nallele_count = {'allele_count': len(alleles)}\nfor allele in alleles:\n allele.update_attributes(allele_count)\nreturn SummaryVariant(alleles)", "records = []\nalts = vcf_variant.alts if vcf_variant.alts is not None else ['.']\nallele_count = len(alts) + 1\nrecords.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': None, 'summary_variant_index': summary_variant_index, 'allele_index': 0, 'allele_count': allele_count})\nfor allele_index, alt in enumerate(alts):\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': alt, 'summary_variant_index': summary_variant_index, 'allele_index': allele_index + 1, 'allele_count': allele_count})\nreturn SummaryVariantFactory.summary_variant_from_records(records, transmission_type=transmission_type)"], "bodies_text": "<|body_start_0|>\n if transmission_type is not None:\n record['transmission_type'] = transmission_type\n alternative = record['alternative']\n attributes = record\n if attr_filter:\n attributes = {k: v for k, v in record.items() if k not in attr_filter}\n if 'summary_variant_index' in record:\n summary_index = record['summary_variant_index']\n else:\n summary_index = record.get('summary_index')\n allele_index = record['allele_index']\n chrom = record['chrom']\n position = record['position']\n end_position = record.get('end_position')\n reference = record['reference']\n alternative = record.get('alternative')\n allele_type = record.get('variant_type', None)\n transmission_type = TransmissionType(record.get('transmission_type', TransmissionType.transmitted))\n if position is not None and end_position is not None and (reference is None) and (alternative is None) and (allele_type is None):\n allele_type = SummaryAllele.Type.position\n if allele_type is not None:\n allele_type = core.Allele.Type(allele_type)\n return SummaryAllele(chrom, position, reference, alternative=alternative, summary_index=summary_index, end_position=record.get('end_position', None), allele_type=allele_type, allele_index=allele_index, transmission_type=transmission_type, attributes=attributes)\n<|end_body_0|>\n\n<|body_start_1|>\n assert len(records) > 0\n alleles = []\n for record in records:\n allele = SummaryVariantFactory.summary_allele_from_record(record, transmission_type=transmission_type, attr_filter=attr_filter)\n alleles.append(allele)\n if not alleles[0].is_reference_allele:\n ref_allele = SummaryAllele.create_reference_allele(alleles[0])\n alleles.insert(0, ref_allele)\n allele_count = {'allele_count': len(alleles)}\n for allele in alleles:\n allele.update_attributes(allele_count)\n return SummaryVariant(alleles)\n<|end_body_1|>\n\n<|body_start_2|>\n records = []\n alts = vcf_variant.alts if vcf_variant.alts is not None else ['.']\n allele_count = len(alts) + 1\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': None, 'summary_variant_index': summary_variant_index, 'allele_index': 0, 'allele_count': allele_count})\n for allele_index, alt in enumerate(alts):\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': alt, 'summary_variant_index': summary_variant_index, 'allele_index': allele_index + 1, 'allele_count': allele_count})\n return SummaryVariantFactory.summary_variant_from_records(records, transmission_type=transmission_type)\n<|end_body_2|>\n", "class_docstring": "Factory for summary variants.", "class_name": "SummaryVariantFactory", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SummaryVariantFactory:\n \"\"\"Factory for summary variants.\"\"\"\n\n def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele:\n \"\"\"Build a summary allele from a dictionary (record).\"\"\"\n <|body_0|>\n\n def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant:\n \"\"\"Build summary variant from a list of dictionaries (records).\"\"\"\n <|body_1|>\n\n def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant:\n \"\"\"Build sumamry variant from a pysam VCF record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if transmission_type is not None:\n record['transmission_type'] = transmission_type\n alternative = record['alternative']\n attributes = record\n if attr_filter:\n attributes = {k: v for k, v in record.items() if k not in attr_filter}\n if 'summary_variant_index' in record:\n summary_index = record['summary_variant_index']\n else:\n summary_index = record.get('summary_index')\n allele_index = record['allele_index']\n chrom = record['chrom']\n position = record['position']\n end_position = record.get('end_position')\n reference = record['reference']\n alternative = record.get('alternative')\n allele_type = record.get('variant_type', None)\n transmission_type = TransmissionType(record.get('transmission_type', TransmissionType.transmitted))\n if position is not None and end_position is not None and (reference is None) and (alternative is None) and (allele_type is None):\n allele_type = SummaryAllele.Type.position\n if allele_type is not None:\n allele_type = core.Allele.Type(allele_type)\n return SummaryAllele(chrom, position, reference, alternative=alternative, summary_index=summary_index, end_position=record.get('end_position', None), allele_type=allele_type, allele_index=allele_index, transmission_type=transmission_type, attributes=attributes)\n<|end_body_0|>\n\n<|body_start_1|>\n assert len(records) > 0\n alleles = []\n for record in records:\n allele = SummaryVariantFactory.summary_allele_from_record(record, transmission_type=transmission_type, attr_filter=attr_filter)\n alleles.append(allele)\n if not alleles[0].is_reference_allele:\n ref_allele = SummaryAllele.create_reference_allele(alleles[0])\n alleles.insert(0, ref_allele)\n allele_count = {'allele_count': len(alleles)}\n for allele in alleles:\n allele.update_attributes(allele_count)\n return SummaryVariant(alleles)\n<|end_body_1|>\n\n<|body_start_2|>\n records = []\n alts = vcf_variant.alts if vcf_variant.alts is not None else ['.']\n allele_count = len(alts) + 1\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': None, 'summary_variant_index': summary_variant_index, 'allele_index': 0, 'allele_count': allele_count})\n for allele_index, alt in enumerate(alts):\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': alt, 'summary_variant_index': summary_variant_index, 'allele_index': allele_index + 1, 'allele_count': allele_count})\n return SummaryVariantFactory.summary_variant_from_records(records, transmission_type=transmission_type)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000171", "length_bytes": 32757, "license_type": "permissive", "methods": [{"docstring": "Build a summary allele from a dictionary (record).", "name": "summary_allele_from_record", "signature": "def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele"}, {"docstring": "Build summary variant from a list of dictionaries (records).", "name": "summary_variant_from_records", "signature": "def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant"}, {"docstring": "Build sumamry variant from a pysam VCF record.", "name": "summary_variant_from_vcf", "signature": "def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant"}], "n_methods": 3, "prompt": "Implement the Python class `SummaryVariantFactory` described below.\n\nClass description:\nFactory for summary variants.\n\nMethod signatures and docstrings:\n- def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele: Build a summary allele from a dictionary (record).\n- def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant: Build summary variant from a list of dictionaries (records).\n- def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant: Build sumamry variant from a pysam VCF record.", "prompted_full_text": "Implement the Python class `SummaryVariantFactory` described below.\n\nClass description:\nFactory for summary variants.\n\nMethod signatures and docstrings:\n- def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele: Build a summary allele from a dictionary (record).\n- def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant: Build summary variant from a list of dictionaries (records).\n- def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant: Build sumamry variant from a pysam VCF record.\n\n<|skeleton|>\nclass SummaryVariantFactory:\n \"\"\"Factory for summary variants.\"\"\"\n\n def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele:\n \"\"\"Build a summary allele from a dictionary (record).\"\"\"\n <|body_0|>\n\n def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant:\n \"\"\"Build summary variant from a list of dictionaries (records).\"\"\"\n <|body_1|>\n\n def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant:\n \"\"\"Build sumamry variant from a pysam VCF record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if transmission_type is not None:\n record['transmission_type'] = transmission_type\n alternative = record['alternative']\n attributes = record\n if attr_filter:\n attributes = {k: v for k, v in record.items() if k not in attr_filter}\n if 'summary_variant_index' in record:\n summary_index = record['summary_variant_index']\n else:\n summary_index = record.get('summary_index')\n allele_index = record['allele_index']\n chrom = record['chrom']\n position = record['position']\n end_position = record.get('end_position')\n reference = record['reference']\n alternative = record.get('alternative')\n allele_type = record.get('variant_type', None)\n transmission_type = TransmissionType(record.get('transmission_type', TransmissionType.transmitted))\n if position is not None and end_position is not None and (reference is None) and (alternative is None) and (allele_type is None):\n allele_type = SummaryAllele.Type.position\n if allele_type is not None:\n allele_type = core.Allele.Type(allele_type)\n return SummaryAllele(chrom, position, reference, alternative=alternative, summary_index=summary_index, end_position=record.get('end_position', None), allele_type=allele_type, allele_index=allele_index, transmission_type=transmission_type, attributes=attributes)\n<|end_body_0|>\n\n<|body_start_1|>\n assert len(records) > 0\n alleles = []\n for record in records:\n allele = SummaryVariantFactory.summary_allele_from_record(record, transmission_type=transmission_type, attr_filter=attr_filter)\n alleles.append(allele)\n if not alleles[0].is_reference_allele:\n ref_allele = SummaryAllele.create_reference_allele(alleles[0])\n alleles.insert(0, ref_allele)\n allele_count = {'allele_count': len(alleles)}\n for allele in alleles:\n allele.update_attributes(allele_count)\n return SummaryVariant(alleles)\n<|end_body_1|>\n\n<|body_start_2|>\n records = []\n alts = vcf_variant.alts if vcf_variant.alts is not None else ['.']\n allele_count = len(alts) + 1\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': None, 'summary_variant_index': summary_variant_index, 'allele_index': 0, 'allele_count': allele_count})\n for allele_index, alt in enumerate(alts):\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': alt, 'summary_variant_index': summary_variant_index, 'allele_index': allele_index + 1, 'allele_count': allele_count})\n return SummaryVariantFactory.summary_variant_from_records(records, transmission_type=transmission_type)\n<|end_body_2|>\n", "revision_id": "21c8d4d32f632431704556f8bcb158f9bb686239", "skeleton": "<|skeleton|>\nclass SummaryVariantFactory:\n \"\"\"Factory for summary variants.\"\"\"\n\n def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele:\n \"\"\"Build a summary allele from a dictionary (record).\"\"\"\n <|body_0|>\n\n def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant:\n \"\"\"Build summary variant from a list of dictionaries (records).\"\"\"\n <|body_1|>\n\n def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant:\n \"\"\"Build sumamry variant from a pysam VCF record.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SummaryVariantFactory:\n \"\"\"Factory for summary variants.\"\"\"\n\n def summary_allele_from_record(record: dict[str, Any], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryAllele:\n \"\"\"Build a summary allele from a dictionary (record).\"\"\"\n if transmission_type is not None:\n record['transmission_type'] = transmission_type\n alternative = record['alternative']\n attributes = record\n if attr_filter:\n attributes = {k: v for k, v in record.items() if k not in attr_filter}\n if 'summary_variant_index' in record:\n summary_index = record['summary_variant_index']\n else:\n summary_index = record.get('summary_index')\n allele_index = record['allele_index']\n chrom = record['chrom']\n position = record['position']\n end_position = record.get('end_position')\n reference = record['reference']\n alternative = record.get('alternative')\n allele_type = record.get('variant_type', None)\n transmission_type = TransmissionType(record.get('transmission_type', TransmissionType.transmitted))\n if position is not None and end_position is not None and (reference is None) and (alternative is None) and (allele_type is None):\n allele_type = SummaryAllele.Type.position\n if allele_type is not None:\n allele_type = core.Allele.Type(allele_type)\n return SummaryAllele(chrom, position, reference, alternative=alternative, summary_index=summary_index, end_position=record.get('end_position', None), allele_type=allele_type, allele_index=allele_index, transmission_type=transmission_type, attributes=attributes)\n\n def summary_variant_from_records(records: list[dict[str, Any]], transmission_type: Optional[TransmissionType]=None, attr_filter: Optional[set[str]]=None) -> SummaryVariant:\n \"\"\"Build summary variant from a list of dictionaries (records).\"\"\"\n assert len(records) > 0\n alleles = []\n for record in records:\n allele = SummaryVariantFactory.summary_allele_from_record(record, transmission_type=transmission_type, attr_filter=attr_filter)\n alleles.append(allele)\n if not alleles[0].is_reference_allele:\n ref_allele = SummaryAllele.create_reference_allele(alleles[0])\n alleles.insert(0, ref_allele)\n allele_count = {'allele_count': len(alleles)}\n for allele in alleles:\n allele.update_attributes(allele_count)\n return SummaryVariant(alleles)\n\n def summary_variant_from_vcf(vcf_variant: pysam.VariantRecord, summary_variant_index: int, transmission_type: TransmissionType) -> SummaryVariant:\n \"\"\"Build sumamry variant from a pysam VCF record.\"\"\"\n records = []\n alts = vcf_variant.alts if vcf_variant.alts is not None else ['.']\n allele_count = len(alts) + 1\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': None, 'summary_variant_index': summary_variant_index, 'allele_index': 0, 'allele_count': allele_count})\n for allele_index, alt in enumerate(alts):\n records.append({'chrom': vcf_variant.chrom, 'position': vcf_variant.pos, 'reference': vcf_variant.ref, 'alternative': alt, 'summary_variant_index': summary_variant_index, 'allele_index': allele_index + 1, 'allele_count': allele_count})\n return SummaryVariantFactory.summary_variant_from_records(records, transmission_type=transmission_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "dae/dae/variants/variant.py", "source_repo": "iossifovlab/gpf", "split": "val", "star_events_count": 5} {"blob_id": "8df6a2c611ec0b231297b531ecb457fded5a8281", "bodies": ["for start in range(0, 101, 20):\n page_url = self.url.format(start)\n yield scrapy.Request(url=page_url, callback=self.parse)", "html = json.loads(response.text)\nfor one_dict in html:\n item = DoubanItem()\n item['rank'] = one_dict['rank']\n item['title'] = one_dict['title']\n item['score'] = one_dict['score']\n yield item"], "bodies_text": "<|body_start_0|>\n for start in range(0, 101, 20):\n page_url = self.url.format(start)\n yield scrapy.Request(url=page_url, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n html = json.loads(response.text)\n for one_dict in html:\n item = DoubanItem()\n item['rank'] = one_dict['rank']\n item['title'] = one_dict['title']\n item['score'] = one_dict['score']\n yield item\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DoubanSpider", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DoubanSpider:\n\n def start_requests(self):\n \"\"\"生成所有要抓取的URL地址,交给调度器入队列\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析提取数据\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for start in range(0, 101, 20):\n page_url = self.url.format(start)\n yield scrapy.Request(url=page_url, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n html = json.loads(response.text)\n for one_dict in html:\n item = DoubanItem()\n item['rank'] = one_dict['rank']\n item['title'] = one_dict['title']\n item['score'] = one_dict['score']\n yield item\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000172", "length_bytes": 996, "license_type": "permissive", "methods": [{"docstring": "生成所有要抓取的URL地址,交给调度器入队列", "name": "start_requests", "signature": "def start_requests(self)"}, {"docstring": "解析提取数据", "name": "parse", "signature": "def parse(self, response)"}], "n_methods": 2, "prompt": "Implement the Python class `DoubanSpider` described below.\n\nClass description:\nImplement the DoubanSpider class.\n\nMethod signatures and docstrings:\n- def start_requests(self): 生成所有要抓取的URL地址,交给调度器入队列\n- def parse(self, response): 解析提取数据", "prompted_full_text": "Implement the Python class `DoubanSpider` described below.\n\nClass description:\nImplement the DoubanSpider class.\n\nMethod signatures and docstrings:\n- def start_requests(self): 生成所有要抓取的URL地址,交给调度器入队列\n- def parse(self, response): 解析提取数据\n\n<|skeleton|>\nclass DoubanSpider:\n\n def start_requests(self):\n \"\"\"生成所有要抓取的URL地址,交给调度器入队列\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析提取数据\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for start in range(0, 101, 20):\n page_url = self.url.format(start)\n yield scrapy.Request(url=page_url, callback=self.parse)\n<|end_body_0|>\n\n<|body_start_1|>\n html = json.loads(response.text)\n for one_dict in html:\n item = DoubanItem()\n item['rank'] = one_dict['rank']\n item['title'] = one_dict['title']\n item['score'] = one_dict['score']\n yield item\n<|end_body_1|>\n", "revision_id": "abe983ddc52690f4726cf42cc6390cba815026d8", "skeleton": "<|skeleton|>\nclass DoubanSpider:\n\n def start_requests(self):\n \"\"\"生成所有要抓取的URL地址,交给调度器入队列\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"解析提取数据\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DoubanSpider:\n def start_requests(self):\n \"\"\"生成所有要抓取的URL地址,交给调度器入队列\"\"\"\n for start in range(0, 101, 20):\n page_url = self.url.format(start)\n yield scrapy.Request(url=page_url, callback=self.parse)\n\n def parse(self, response):\n \"\"\"解析提取数据\"\"\"\n html = json.loads(response.text)\n for one_dict in html:\n item = DoubanItem()\n item['rank'] = one_dict['rank']\n item['title'] = one_dict['title']\n item['score'] = one_dict['score']\n yield item\n", "source": "the_stack_v2_python_sparse", "source_path": "month05/spider/day07_course/day07_code/Douban/Douban/spiders/douban.py", "source_repo": "chaofan-zheng/tedu-python-demo", "split": "val", "star_events_count": 4} {"blob_id": "d3780d70e5a147f2bb59781c3b19ccfac1c3c115", "bodies": ["self.base_name = name\nself.cmd = cmd\nself.params = list(param_generator)\nself.env_vars = env_vars", "num_experiments = 1 if len(self.params) == 0 else len(self.params)\nfor experiment_idx in range(num_experiments):\n cmd_tokens = [self.cmd]\n experiment_name_tokens = [self.base_name]\n param_shorthands = []\n if len(self.params) > 0:\n params = self.params[experiment_idx]\n for param, value in params.items():\n param_str = f'{param_prefix}{param}={value}'\n cmd_tokens.append(param_str)\n param_tokens = re.split('[._-]', param)\n shorthand_tokens = [t[0] for t in param_tokens[:-1]]\n last_token_l = min(3, len(param_tokens[-1]))\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:\n last_token_l += 1\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n param_shorthands.append(shorthand)\n experiment_name_token = f'{shorthand}_{value}'\n experiment_name_tokens.append(experiment_name_token)\n if customize_experiment_name:\n experiment_name = f'{experiment_idx:02d}_' + '_'.join(experiment_name_tokens)\n if len(experiment_name) > 100:\n log.warning('Experiment name is extra long! (%d characters)', len(experiment_name))\n else:\n experiment_name = f'{experiment_idx:02d}_{self.base_name}'\n cmd_tokens.append(f'{experiment_arg_name}={experiment_name}')\n param_str = ' '.join(cmd_tokens)\n yield (param_str, experiment_name)"], "bodies_text": "<|body_start_0|>\n self.base_name = name\n self.cmd = cmd\n self.params = list(param_generator)\n self.env_vars = env_vars\n<|end_body_0|>\n\n<|body_start_1|>\n num_experiments = 1 if len(self.params) == 0 else len(self.params)\n for experiment_idx in range(num_experiments):\n cmd_tokens = [self.cmd]\n experiment_name_tokens = [self.base_name]\n param_shorthands = []\n if len(self.params) > 0:\n params = self.params[experiment_idx]\n for param, value in params.items():\n param_str = f'{param_prefix}{param}={value}'\n cmd_tokens.append(param_str)\n param_tokens = re.split('[._-]', param)\n shorthand_tokens = [t[0] for t in param_tokens[:-1]]\n last_token_l = min(3, len(param_tokens[-1]))\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:\n last_token_l += 1\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n param_shorthands.append(shorthand)\n experiment_name_token = f'{shorthand}_{value}'\n experiment_name_tokens.append(experiment_name_token)\n if customize_experiment_name:\n experiment_name = f'{experiment_idx:02d}_' + '_'.join(experiment_name_tokens)\n if len(experiment_name) > 100:\n log.warning('Experiment name is extra long! (%d characters)', len(experiment_name))\n else:\n experiment_name = f'{experiment_idx:02d}_{self.base_name}'\n cmd_tokens.append(f'{experiment_arg_name}={experiment_name}')\n param_str = ' '.join(cmd_tokens)\n yield (param_str, experiment_name)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Experiment", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Experiment:\n\n def __init__(self, name, cmd, param_generator=(), env_vars=None):\n \"\"\":param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\"\"\"\n <|body_0|>\n\n def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):\n \"\"\"Yields tuples of (cmd, experiment_name)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.base_name = name\n self.cmd = cmd\n self.params = list(param_generator)\n self.env_vars = env_vars\n<|end_body_0|>\n\n<|body_start_1|>\n num_experiments = 1 if len(self.params) == 0 else len(self.params)\n for experiment_idx in range(num_experiments):\n cmd_tokens = [self.cmd]\n experiment_name_tokens = [self.base_name]\n param_shorthands = []\n if len(self.params) > 0:\n params = self.params[experiment_idx]\n for param, value in params.items():\n param_str = f'{param_prefix}{param}={value}'\n cmd_tokens.append(param_str)\n param_tokens = re.split('[._-]', param)\n shorthand_tokens = [t[0] for t in param_tokens[:-1]]\n last_token_l = min(3, len(param_tokens[-1]))\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:\n last_token_l += 1\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n param_shorthands.append(shorthand)\n experiment_name_token = f'{shorthand}_{value}'\n experiment_name_tokens.append(experiment_name_token)\n if customize_experiment_name:\n experiment_name = f'{experiment_idx:02d}_' + '_'.join(experiment_name_tokens)\n if len(experiment_name) > 100:\n log.warning('Experiment name is extra long! (%d characters)', len(experiment_name))\n else:\n experiment_name = f'{experiment_idx:02d}_{self.base_name}'\n cmd_tokens.append(f'{experiment_arg_name}={experiment_name}')\n param_str = ' '.join(cmd_tokens)\n yield (param_str, experiment_name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000173", "length_bytes": 7490, "license_type": "permissive", "methods": [{"docstring": ":param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts", "name": "__init__", "signature": "def __init__(self, name, cmd, param_generator=(), env_vars=None)"}, {"docstring": "Yields tuples of (cmd, experiment_name)", "name": "generate_experiments", "signature": "def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002455", "prompt": "Implement the Python class `Experiment` described below.\n\nClass description:\nImplement the Experiment class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, cmd, param_generator=(), env_vars=None): :param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\n- def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix): Yields tuples of (cmd, experiment_name)", "prompted_full_text": "Implement the Python class `Experiment` described below.\n\nClass description:\nImplement the Experiment class.\n\nMethod signatures and docstrings:\n- def __init__(self, name, cmd, param_generator=(), env_vars=None): :param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\n- def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix): Yields tuples of (cmd, experiment_name)\n\n<|skeleton|>\nclass Experiment:\n\n def __init__(self, name, cmd, param_generator=(), env_vars=None):\n \"\"\":param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\"\"\"\n <|body_0|>\n\n def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):\n \"\"\"Yields tuples of (cmd, experiment_name)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.base_name = name\n self.cmd = cmd\n self.params = list(param_generator)\n self.env_vars = env_vars\n<|end_body_0|>\n\n<|body_start_1|>\n num_experiments = 1 if len(self.params) == 0 else len(self.params)\n for experiment_idx in range(num_experiments):\n cmd_tokens = [self.cmd]\n experiment_name_tokens = [self.base_name]\n param_shorthands = []\n if len(self.params) > 0:\n params = self.params[experiment_idx]\n for param, value in params.items():\n param_str = f'{param_prefix}{param}={value}'\n cmd_tokens.append(param_str)\n param_tokens = re.split('[._-]', param)\n shorthand_tokens = [t[0] for t in param_tokens[:-1]]\n last_token_l = min(3, len(param_tokens[-1]))\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:\n last_token_l += 1\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n param_shorthands.append(shorthand)\n experiment_name_token = f'{shorthand}_{value}'\n experiment_name_tokens.append(experiment_name_token)\n if customize_experiment_name:\n experiment_name = f'{experiment_idx:02d}_' + '_'.join(experiment_name_tokens)\n if len(experiment_name) > 100:\n log.warning('Experiment name is extra long! (%d characters)', len(experiment_name))\n else:\n experiment_name = f'{experiment_idx:02d}_{self.base_name}'\n cmd_tokens.append(f'{experiment_arg_name}={experiment_name}')\n param_str = ' '.join(cmd_tokens)\n yield (param_str, experiment_name)\n<|end_body_1|>\n", "revision_id": "7e1e69550f4de4cdc003d8db5bb39e186803aee9", "skeleton": "<|skeleton|>\nclass Experiment:\n\n def __init__(self, name, cmd, param_generator=(), env_vars=None):\n \"\"\":param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\"\"\"\n <|body_0|>\n\n def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):\n \"\"\"Yields tuples of (cmd, experiment_name)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Experiment:\n def __init__(self, name, cmd, param_generator=(), env_vars=None):\n \"\"\":param cmd: base command to append the parameters to :param param_generator: iterable of parameter dicts\"\"\"\n self.base_name = name\n self.cmd = cmd\n self.params = list(param_generator)\n self.env_vars = env_vars\n\n def generate_experiments(self, experiment_arg_name, customize_experiment_name, param_prefix):\n \"\"\"Yields tuples of (cmd, experiment_name)\"\"\"\n num_experiments = 1 if len(self.params) == 0 else len(self.params)\n for experiment_idx in range(num_experiments):\n cmd_tokens = [self.cmd]\n experiment_name_tokens = [self.base_name]\n param_shorthands = []\n if len(self.params) > 0:\n params = self.params[experiment_idx]\n for param, value in params.items():\n param_str = f'{param_prefix}{param}={value}'\n cmd_tokens.append(param_str)\n param_tokens = re.split('[._-]', param)\n shorthand_tokens = [t[0] for t in param_tokens[:-1]]\n last_token_l = min(3, len(param_tokens[-1]))\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n while last_token_l <= len(param_tokens[-1]) and shorthand in param_shorthands:\n last_token_l += 1\n shorthand = '.'.join(shorthand_tokens + [param_tokens[-1][:last_token_l]])\n param_shorthands.append(shorthand)\n experiment_name_token = f'{shorthand}_{value}'\n experiment_name_tokens.append(experiment_name_token)\n if customize_experiment_name:\n experiment_name = f'{experiment_idx:02d}_' + '_'.join(experiment_name_tokens)\n if len(experiment_name) > 100:\n log.warning('Experiment name is extra long! (%d characters)', len(experiment_name))\n else:\n experiment_name = f'{experiment_idx:02d}_{self.base_name}'\n cmd_tokens.append(f'{experiment_arg_name}={experiment_name}')\n param_str = ' '.join(cmd_tokens)\n yield (param_str, experiment_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "sample_factory/launcher/run_description.py", "source_repo": "alex-petrenko/sample-factory", "split": "val", "star_events_count": 644} {"blob_id": "bac021f94f2e11735c86c690c60800a8c16a2fd2", "bodies": ["if pull_target and app_engine_http_target:\n raise CreatingPullAndAppEngineQueueError('Attempting to send PullTarget and AppEngineHttpTarget simultaneously')\nqueue = self.messages.Queue(name=queue_ref.RelativeName(), retryConfig=retry_config, rateLimits=rate_limits, pullTarget=pull_target, appEngineHttpTarget=app_engine_http_target)\nrequest = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(parent=parent_ref.RelativeName(), queue=queue)\nreturn self.queues_service.Create(request)", "if not any([retry_config, rate_limits, app_engine_routing_override]):\n raise NoFieldsSpecifiedError('Must specify at least one field to update.')\nqueue = self.messages.Queue(name=queue_ref.RelativeName())\nif retry_config is not None:\n queue.retryConfig = retry_config\nif rate_limits is not None:\n queue.rateLimits = rate_limits\nif app_engine_routing_override is not None:\n if _IsEmptyConfig(app_engine_routing_override):\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()\n else:\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(appEngineRoutingOverride=app_engine_routing_override)\nupdate_mask = ','.join(updated_fields)\nrequest = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask)\nreturn self.queues_service.Patch(request)"], "bodies_text": "<|body_start_0|>\n if pull_target and app_engine_http_target:\n raise CreatingPullAndAppEngineQueueError('Attempting to send PullTarget and AppEngineHttpTarget simultaneously')\n queue = self.messages.Queue(name=queue_ref.RelativeName(), retryConfig=retry_config, rateLimits=rate_limits, pullTarget=pull_target, appEngineHttpTarget=app_engine_http_target)\n request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(parent=parent_ref.RelativeName(), queue=queue)\n return self.queues_service.Create(request)\n<|end_body_0|>\n\n<|body_start_1|>\n if not any([retry_config, rate_limits, app_engine_routing_override]):\n raise NoFieldsSpecifiedError('Must specify at least one field to update.')\n queue = self.messages.Queue(name=queue_ref.RelativeName())\n if retry_config is not None:\n queue.retryConfig = retry_config\n if rate_limits is not None:\n queue.rateLimits = rate_limits\n if app_engine_routing_override is not None:\n if _IsEmptyConfig(app_engine_routing_override):\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()\n else:\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(appEngineRoutingOverride=app_engine_routing_override)\n update_mask = ','.join(updated_fields)\n request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask)\n return self.queues_service.Patch(request)\n<|end_body_1|>\n", "class_docstring": "Client for queues service in the Cloud Tasks API.", "class_name": "AlphaQueues", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AlphaQueues:\n \"\"\"Client for queues service in the Cloud Tasks API.\"\"\"\n\n def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None):\n \"\"\"Prepares and sends a Create request for creating a queue.\"\"\"\n <|body_0|>\n\n def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None):\n \"\"\"Prepares and sends a Patch request for modifying a queue.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if pull_target and app_engine_http_target:\n raise CreatingPullAndAppEngineQueueError('Attempting to send PullTarget and AppEngineHttpTarget simultaneously')\n queue = self.messages.Queue(name=queue_ref.RelativeName(), retryConfig=retry_config, rateLimits=rate_limits, pullTarget=pull_target, appEngineHttpTarget=app_engine_http_target)\n request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(parent=parent_ref.RelativeName(), queue=queue)\n return self.queues_service.Create(request)\n<|end_body_0|>\n\n<|body_start_1|>\n if not any([retry_config, rate_limits, app_engine_routing_override]):\n raise NoFieldsSpecifiedError('Must specify at least one field to update.')\n queue = self.messages.Queue(name=queue_ref.RelativeName())\n if retry_config is not None:\n queue.retryConfig = retry_config\n if rate_limits is not None:\n queue.rateLimits = rate_limits\n if app_engine_routing_override is not None:\n if _IsEmptyConfig(app_engine_routing_override):\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()\n else:\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(appEngineRoutingOverride=app_engine_routing_override)\n update_mask = ','.join(updated_fields)\n request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask)\n return self.queues_service.Patch(request)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000174", "length_bytes": 9305, "license_type": "permissive", "methods": [{"docstring": "Prepares and sends a Create request for creating a queue.", "name": "Create", "signature": "def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None)"}, {"docstring": "Prepares and sends a Patch request for modifying a queue.", "name": "Patch", "signature": "def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None)"}], "n_methods": 2, "prompt": "Implement the Python class `AlphaQueues` described below.\n\nClass description:\nClient for queues service in the Cloud Tasks API.\n\nMethod signatures and docstrings:\n- def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None): Prepares and sends a Create request for creating a queue.\n- def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None): Prepares and sends a Patch request for modifying a queue.", "prompted_full_text": "Implement the Python class `AlphaQueues` described below.\n\nClass description:\nClient for queues service in the Cloud Tasks API.\n\nMethod signatures and docstrings:\n- def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None): Prepares and sends a Create request for creating a queue.\n- def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None): Prepares and sends a Patch request for modifying a queue.\n\n<|skeleton|>\nclass AlphaQueues:\n \"\"\"Client for queues service in the Cloud Tasks API.\"\"\"\n\n def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None):\n \"\"\"Prepares and sends a Create request for creating a queue.\"\"\"\n <|body_0|>\n\n def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None):\n \"\"\"Prepares and sends a Patch request for modifying a queue.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if pull_target and app_engine_http_target:\n raise CreatingPullAndAppEngineQueueError('Attempting to send PullTarget and AppEngineHttpTarget simultaneously')\n queue = self.messages.Queue(name=queue_ref.RelativeName(), retryConfig=retry_config, rateLimits=rate_limits, pullTarget=pull_target, appEngineHttpTarget=app_engine_http_target)\n request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(parent=parent_ref.RelativeName(), queue=queue)\n return self.queues_service.Create(request)\n<|end_body_0|>\n\n<|body_start_1|>\n if not any([retry_config, rate_limits, app_engine_routing_override]):\n raise NoFieldsSpecifiedError('Must specify at least one field to update.')\n queue = self.messages.Queue(name=queue_ref.RelativeName())\n if retry_config is not None:\n queue.retryConfig = retry_config\n if rate_limits is not None:\n queue.rateLimits = rate_limits\n if app_engine_routing_override is not None:\n if _IsEmptyConfig(app_engine_routing_override):\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()\n else:\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(appEngineRoutingOverride=app_engine_routing_override)\n update_mask = ','.join(updated_fields)\n request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask)\n return self.queues_service.Patch(request)\n<|end_body_1|>\n", "revision_id": "85bb264e273568b5a0408f733b403c56373e2508", "skeleton": "<|skeleton|>\nclass AlphaQueues:\n \"\"\"Client for queues service in the Cloud Tasks API.\"\"\"\n\n def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None):\n \"\"\"Prepares and sends a Create request for creating a queue.\"\"\"\n <|body_0|>\n\n def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None):\n \"\"\"Prepares and sends a Patch request for modifying a queue.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AlphaQueues:\n \"\"\"Client for queues service in the Cloud Tasks API.\"\"\"\n\n def Create(self, parent_ref, queue_ref, retry_config=None, rate_limits=None, pull_target=None, app_engine_http_target=None):\n \"\"\"Prepares and sends a Create request for creating a queue.\"\"\"\n if pull_target and app_engine_http_target:\n raise CreatingPullAndAppEngineQueueError('Attempting to send PullTarget and AppEngineHttpTarget simultaneously')\n queue = self.messages.Queue(name=queue_ref.RelativeName(), retryConfig=retry_config, rateLimits=rate_limits, pullTarget=pull_target, appEngineHttpTarget=app_engine_http_target)\n request = self.messages.CloudtasksProjectsLocationsQueuesCreateRequest(parent=parent_ref.RelativeName(), queue=queue)\n return self.queues_service.Create(request)\n\n def Patch(self, queue_ref, updated_fields, retry_config=None, rate_limits=None, app_engine_routing_override=None):\n \"\"\"Prepares and sends a Patch request for modifying a queue.\"\"\"\n if not any([retry_config, rate_limits, app_engine_routing_override]):\n raise NoFieldsSpecifiedError('Must specify at least one field to update.')\n queue = self.messages.Queue(name=queue_ref.RelativeName())\n if retry_config is not None:\n queue.retryConfig = retry_config\n if rate_limits is not None:\n queue.rateLimits = rate_limits\n if app_engine_routing_override is not None:\n if _IsEmptyConfig(app_engine_routing_override):\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget()\n else:\n queue.appEngineHttpTarget = self.messages.AppEngineHttpTarget(appEngineRoutingOverride=app_engine_routing_override)\n update_mask = ','.join(updated_fields)\n request = self.messages.CloudtasksProjectsLocationsQueuesPatchRequest(name=queue_ref.RelativeName(), queue=queue, updateMask=update_mask)\n return self.queues_service.Patch(request)\n", "source": "the_stack_v2_python_sparse", "source_path": "google-cloud-sdk/lib/googlecloudsdk/api_lib/tasks/queues.py", "source_repo": "bopopescu/socialliteapp", "split": "val", "star_events_count": 0} {"blob_id": "ae4b5272470f5cf0f5f669f76622d6c241d3a7c6", "bodies": ["super(MaskedMSELoss, self).__init__()\nself.reduction = reduction\nself.criterion = nn.MSELoss(reduction='none')", "loss = self.criterion(input * mask, target * mask)\nif self.reduction == 'mean':\n loss = torch.sum(loss) / torch.sum(mask)\nreturn loss"], "bodies_text": "<|body_start_0|>\n super(MaskedMSELoss, self).__init__()\n self.reduction = reduction\n self.criterion = nn.MSELoss(reduction='none')\n<|end_body_0|>\n\n<|body_start_1|>\n loss = self.criterion(input * mask, target * mask)\n if self.reduction == 'mean':\n loss = torch.sum(loss) / torch.sum(mask)\n return loss\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MaskedMSELoss", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MaskedMSELoss:\n\n def __init__(self, reduction='mean'):\n \"\"\"Masked MSE implementation :param reduction: the same, as in nn.MSELoss\"\"\"\n <|body_0|>\n\n def forward(self, input, target, mask):\n \"\"\"calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MaskedMSELoss, self).__init__()\n self.reduction = reduction\n self.criterion = nn.MSELoss(reduction='none')\n<|end_body_0|>\n\n<|body_start_1|>\n loss = self.criterion(input * mask, target * mask)\n if self.reduction == 'mean':\n loss = torch.sum(loss) / torch.sum(mask)\n return loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000175", "length_bytes": 20751, "license_type": "permissive", "methods": [{"docstring": "Masked MSE implementation :param reduction: the same, as in nn.MSELoss", "name": "__init__", "signature": "def __init__(self, reduction='mean')"}, {"docstring": "calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss", "name": "forward", "signature": "def forward(self, input, target, mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003312", "prompt": "Implement the Python class `MaskedMSELoss` described below.\n\nClass description:\nImplement the MaskedMSELoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, reduction='mean'): Masked MSE implementation :param reduction: the same, as in nn.MSELoss\n- def forward(self, input, target, mask): calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss", "prompted_full_text": "Implement the Python class `MaskedMSELoss` described below.\n\nClass description:\nImplement the MaskedMSELoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, reduction='mean'): Masked MSE implementation :param reduction: the same, as in nn.MSELoss\n- def forward(self, input, target, mask): calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss\n\n<|skeleton|>\nclass MaskedMSELoss:\n\n def __init__(self, reduction='mean'):\n \"\"\"Masked MSE implementation :param reduction: the same, as in nn.MSELoss\"\"\"\n <|body_0|>\n\n def forward(self, input, target, mask):\n \"\"\"calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MaskedMSELoss, self).__init__()\n self.reduction = reduction\n self.criterion = nn.MSELoss(reduction='none')\n<|end_body_0|>\n\n<|body_start_1|>\n loss = self.criterion(input * mask, target * mask)\n if self.reduction == 'mean':\n loss = torch.sum(loss) / torch.sum(mask)\n return loss\n<|end_body_1|>\n", "revision_id": "c80145929007876a6c459851bfe6d420195c340d", "skeleton": "<|skeleton|>\nclass MaskedMSELoss:\n\n def __init__(self, reduction='mean'):\n \"\"\"Masked MSE implementation :param reduction: the same, as in nn.MSELoss\"\"\"\n <|body_0|>\n\n def forward(self, input, target, mask):\n \"\"\"calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MaskedMSELoss:\n def __init__(self, reduction='mean'):\n \"\"\"Masked MSE implementation :param reduction: the same, as in nn.MSELoss\"\"\"\n super(MaskedMSELoss, self).__init__()\n self.reduction = reduction\n self.criterion = nn.MSELoss(reduction='none')\n\n def forward(self, input, target, mask):\n \"\"\"calculates masked loss :param input: input image as array :param target: reconstructed image as array :param mask: mask of image as array :return: masked loss\"\"\"\n loss = self.criterion(input * mask, target * mask)\n if self.reduction == 'mean':\n loss = torch.sum(loss) / torch.sum(mask)\n return loss\n", "source": "the_stack_v2_python_sparse", "source_path": "src/models/autoencoders.py", "source_repo": "nomiscientist/xray", "split": "val", "star_events_count": 0} {"blob_id": "8ce8a2ca00e9a3a64f0357fea66d4451de94b78a", "bodies": ["total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit)\njob_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos]\nreturn (total, job_infos)", "job = self.job_manager.get_job(train_id)\nif job is None:\n raise TrainJobNotExistError(train_id)\nreturn self._job_2_meta(job)", "info = dict()\ninfo['train_id'] = dir_info['relative_path']\ninfo['create_time'] = dir_info['create_time'].strftime(cls.DATETIME_FORMAT)\ninfo['update_time'] = dir_info['update_time'].strftime(cls.DATETIME_FORMAT)\ninfo['saliency_map'] = dir_info['saliency_map']\ninfo['hierarchical_occlusion'] = dir_info['hierarchical_occlusion']\nreturn info", "info = dict()\ninfo['train_id'] = job.train_id\ninfo['create_time'] = datetime.fromtimestamp(job.create_time).strftime(cls.DATETIME_FORMAT)\ninfo['update_time'] = datetime.fromtimestamp(job.update_time).strftime(cls.DATETIME_FORMAT)\nreturn info", "info = cls._job_2_info(job)\ninfo['sample_count'] = job.sample_count\ninfo['classes'] = job.all_classes\nsaliency_info = dict()\nif job.min_confidence is None:\n saliency_info['min_confidence'] = cls.DEFAULT_MIN_CONFIDENCE\nelse:\n saliency_info['min_confidence'] = job.min_confidence\nsaliency_info['explainers'] = list(job.explainers)\nsaliency_info['metrics'] = list(job.metrics)\ninfo['saliency'] = saliency_info\ninfo['uncertainty'] = {'enabled': job.uncertainty_enabled}\ninfo['status'] = job.status\nreturn info"], "bodies_text": "<|body_start_0|>\n total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit)\n job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos]\n return (total, job_infos)\n<|end_body_0|>\n\n<|body_start_1|>\n job = self.job_manager.get_job(train_id)\n if job is None:\n raise TrainJobNotExistError(train_id)\n return self._job_2_meta(job)\n<|end_body_1|>\n\n<|body_start_2|>\n info = dict()\n info['train_id'] = dir_info['relative_path']\n info['create_time'] = dir_info['create_time'].strftime(cls.DATETIME_FORMAT)\n info['update_time'] = dir_info['update_time'].strftime(cls.DATETIME_FORMAT)\n info['saliency_map'] = dir_info['saliency_map']\n info['hierarchical_occlusion'] = dir_info['hierarchical_occlusion']\n return info\n<|end_body_2|>\n\n<|body_start_3|>\n info = dict()\n info['train_id'] = job.train_id\n info['create_time'] = datetime.fromtimestamp(job.create_time).strftime(cls.DATETIME_FORMAT)\n info['update_time'] = datetime.fromtimestamp(job.update_time).strftime(cls.DATETIME_FORMAT)\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n info = cls._job_2_info(job)\n info['sample_count'] = job.sample_count\n info['classes'] = job.all_classes\n saliency_info = dict()\n if job.min_confidence is None:\n saliency_info['min_confidence'] = cls.DEFAULT_MIN_CONFIDENCE\n else:\n saliency_info['min_confidence'] = job.min_confidence\n saliency_info['explainers'] = list(job.explainers)\n saliency_info['metrics'] = list(job.metrics)\n info['saliency'] = saliency_info\n info['uncertainty'] = {'enabled': job.uncertainty_enabled}\n info['status'] = job.status\n return info\n<|end_body_4|>\n", "class_docstring": "Explain job list encapsulator.", "class_name": "ExplainJobEncap", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "MIT", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExplainJobEncap:\n \"\"\"Explain job list encapsulator.\"\"\"\n\n def query_explain_jobs(self, offset, limit):\n \"\"\"Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\"\"\"\n <|body_0|>\n\n def query_meta(self, train_id):\n \"\"\"Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\"\"\"\n <|body_1|>\n\n def _dir_2_info(cls, dir_info):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_2|>\n\n def _job_2_info(cls, job):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_3|>\n\n def _job_2_meta(cls, job):\n \"\"\"Convert ExplainJob's meta-data to jsonable info object.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit)\n job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos]\n return (total, job_infos)\n<|end_body_0|>\n\n<|body_start_1|>\n job = self.job_manager.get_job(train_id)\n if job is None:\n raise TrainJobNotExistError(train_id)\n return self._job_2_meta(job)\n<|end_body_1|>\n\n<|body_start_2|>\n info = dict()\n info['train_id'] = dir_info['relative_path']\n info['create_time'] = dir_info['create_time'].strftime(cls.DATETIME_FORMAT)\n info['update_time'] = dir_info['update_time'].strftime(cls.DATETIME_FORMAT)\n info['saliency_map'] = dir_info['saliency_map']\n info['hierarchical_occlusion'] = dir_info['hierarchical_occlusion']\n return info\n<|end_body_2|>\n\n<|body_start_3|>\n info = dict()\n info['train_id'] = job.train_id\n info['create_time'] = datetime.fromtimestamp(job.create_time).strftime(cls.DATETIME_FORMAT)\n info['update_time'] = datetime.fromtimestamp(job.update_time).strftime(cls.DATETIME_FORMAT)\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n info = cls._job_2_info(job)\n info['sample_count'] = job.sample_count\n info['classes'] = job.all_classes\n saliency_info = dict()\n if job.min_confidence is None:\n saliency_info['min_confidence'] = cls.DEFAULT_MIN_CONFIDENCE\n else:\n saliency_info['min_confidence'] = job.min_confidence\n saliency_info['explainers'] = list(job.explainers)\n saliency_info['metrics'] = list(job.metrics)\n info['saliency'] = saliency_info\n info['uncertainty'] = {'enabled': job.uncertainty_enabled}\n info['status'] = job.status\n return info\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000176", "length_bytes": 3607, "license_type": "permissive", "methods": [{"docstring": "Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.", "name": "query_explain_jobs", "signature": "def query_explain_jobs(self, offset, limit)"}, {"docstring": "Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.", "name": "query_meta", "signature": "def query_meta(self, train_id)"}, {"docstring": "Convert ExplainJob object to jsonable info object.", "name": "_dir_2_info", "signature": "def _dir_2_info(cls, dir_info)"}, {"docstring": "Convert ExplainJob object to jsonable info object.", "name": "_job_2_info", "signature": "def _job_2_info(cls, job)"}, {"docstring": "Convert ExplainJob's meta-data to jsonable info object.", "name": "_job_2_meta", "signature": "def _job_2_meta(cls, job)"}], "n_methods": 5, "prompt": "Implement the Python class `ExplainJobEncap` described below.\n\nClass description:\nExplain job list encapsulator.\n\nMethod signatures and docstrings:\n- def query_explain_jobs(self, offset, limit): Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\n- def query_meta(self, train_id): Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\n- def _dir_2_info(cls, dir_info): Convert ExplainJob object to jsonable info object.\n- def _job_2_info(cls, job): Convert ExplainJob object to jsonable info object.\n- def _job_2_meta(cls, job): Convert ExplainJob's meta-data to jsonable info object.", "prompted_full_text": "Implement the Python class `ExplainJobEncap` described below.\n\nClass description:\nExplain job list encapsulator.\n\nMethod signatures and docstrings:\n- def query_explain_jobs(self, offset, limit): Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\n- def query_meta(self, train_id): Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\n- def _dir_2_info(cls, dir_info): Convert ExplainJob object to jsonable info object.\n- def _job_2_info(cls, job): Convert ExplainJob object to jsonable info object.\n- def _job_2_meta(cls, job): Convert ExplainJob's meta-data to jsonable info object.\n\n<|skeleton|>\nclass ExplainJobEncap:\n \"\"\"Explain job list encapsulator.\"\"\"\n\n def query_explain_jobs(self, offset, limit):\n \"\"\"Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\"\"\"\n <|body_0|>\n\n def query_meta(self, train_id):\n \"\"\"Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\"\"\"\n <|body_1|>\n\n def _dir_2_info(cls, dir_info):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_2|>\n\n def _job_2_info(cls, job):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_3|>\n\n def _job_2_meta(cls, job):\n \"\"\"Convert ExplainJob's meta-data to jsonable info object.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit)\n job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos]\n return (total, job_infos)\n<|end_body_0|>\n\n<|body_start_1|>\n job = self.job_manager.get_job(train_id)\n if job is None:\n raise TrainJobNotExistError(train_id)\n return self._job_2_meta(job)\n<|end_body_1|>\n\n<|body_start_2|>\n info = dict()\n info['train_id'] = dir_info['relative_path']\n info['create_time'] = dir_info['create_time'].strftime(cls.DATETIME_FORMAT)\n info['update_time'] = dir_info['update_time'].strftime(cls.DATETIME_FORMAT)\n info['saliency_map'] = dir_info['saliency_map']\n info['hierarchical_occlusion'] = dir_info['hierarchical_occlusion']\n return info\n<|end_body_2|>\n\n<|body_start_3|>\n info = dict()\n info['train_id'] = job.train_id\n info['create_time'] = datetime.fromtimestamp(job.create_time).strftime(cls.DATETIME_FORMAT)\n info['update_time'] = datetime.fromtimestamp(job.update_time).strftime(cls.DATETIME_FORMAT)\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n info = cls._job_2_info(job)\n info['sample_count'] = job.sample_count\n info['classes'] = job.all_classes\n saliency_info = dict()\n if job.min_confidence is None:\n saliency_info['min_confidence'] = cls.DEFAULT_MIN_CONFIDENCE\n else:\n saliency_info['min_confidence'] = job.min_confidence\n saliency_info['explainers'] = list(job.explainers)\n saliency_info['metrics'] = list(job.metrics)\n info['saliency'] = saliency_info\n info['uncertainty'] = {'enabled': job.uncertainty_enabled}\n info['status'] = job.status\n return info\n<|end_body_4|>\n", "revision_id": "a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1", "skeleton": "<|skeleton|>\nclass ExplainJobEncap:\n \"\"\"Explain job list encapsulator.\"\"\"\n\n def query_explain_jobs(self, offset, limit):\n \"\"\"Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\"\"\"\n <|body_0|>\n\n def query_meta(self, train_id):\n \"\"\"Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\"\"\"\n <|body_1|>\n\n def _dir_2_info(cls, dir_info):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_2|>\n\n def _job_2_info(cls, job):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n <|body_3|>\n\n def _job_2_meta(cls, job):\n \"\"\"Convert ExplainJob's meta-data to jsonable info object.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExplainJobEncap:\n \"\"\"Explain job list encapsulator.\"\"\"\n\n def query_explain_jobs(self, offset, limit):\n \"\"\"Query explain job list. Args: offset (int): Page offset. limit (int): Maximum number of items to be returned. Returns: tuple[int, list[Dict]], total number of jobs and job list.\"\"\"\n total, dir_infos = self.job_manager.get_job_list(offset=offset, limit=limit)\n job_infos = [self._dir_2_info(dir_info) for dir_info in dir_infos]\n return (total, job_infos)\n\n def query_meta(self, train_id):\n \"\"\"Query explain job meta-data. Args: train_id (str): Job ID. Returns: dict, the metadata.\"\"\"\n job = self.job_manager.get_job(train_id)\n if job is None:\n raise TrainJobNotExistError(train_id)\n return self._job_2_meta(job)\n\n def _dir_2_info(cls, dir_info):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n info = dict()\n info['train_id'] = dir_info['relative_path']\n info['create_time'] = dir_info['create_time'].strftime(cls.DATETIME_FORMAT)\n info['update_time'] = dir_info['update_time'].strftime(cls.DATETIME_FORMAT)\n info['saliency_map'] = dir_info['saliency_map']\n info['hierarchical_occlusion'] = dir_info['hierarchical_occlusion']\n return info\n\n def _job_2_info(cls, job):\n \"\"\"Convert ExplainJob object to jsonable info object.\"\"\"\n info = dict()\n info['train_id'] = job.train_id\n info['create_time'] = datetime.fromtimestamp(job.create_time).strftime(cls.DATETIME_FORMAT)\n info['update_time'] = datetime.fromtimestamp(job.update_time).strftime(cls.DATETIME_FORMAT)\n return info\n\n def _job_2_meta(cls, job):\n \"\"\"Convert ExplainJob's meta-data to jsonable info object.\"\"\"\n info = cls._job_2_info(job)\n info['sample_count'] = job.sample_count\n info['classes'] = job.all_classes\n saliency_info = dict()\n if job.min_confidence is None:\n saliency_info['min_confidence'] = cls.DEFAULT_MIN_CONFIDENCE\n else:\n saliency_info['min_confidence'] = job.min_confidence\n saliency_info['explainers'] = list(job.explainers)\n saliency_info['metrics'] = list(job.metrics)\n info['saliency'] = saliency_info\n info['uncertainty'] = {'enabled': job.uncertainty_enabled}\n info['status'] = job.status\n return info\n", "source": "the_stack_v2_python_sparse", "source_path": "mindinsight/explainer/encapsulator/explain_job_encap.py", "source_repo": "mindspore-ai/mindinsight", "split": "val", "star_events_count": 224} {"blob_id": "a3473ba61beccbfb158e8b2d1a69589222fdcb20", "bodies": ["res = set()\nB = set()\nfor a in A:\n B = {b | a for b in B} | {a}\n res |= B\nreturn len(res)", "max_a = max(A)\nmask = 1\nwhile mask <= max_a:\n mask <<= 1\nmask -= 1\nres = set()\nfor i, a in enumerate(A):\n res.add(a)\n j = i - 1\n cur = a\n while j >= 0 and cur < mask:\n cur |= A[j]\n res.add(cur)\n j -= 1\nreturn len(res)"], "bodies_text": "<|body_start_0|>\n res = set()\n B = set()\n for a in A:\n B = {b | a for b in B} | {a}\n res |= B\n return len(res)\n<|end_body_0|>\n\n<|body_start_1|>\n max_a = max(A)\n mask = 1\n while mask <= max_a:\n mask <<= 1\n mask -= 1\n res = set()\n for i, a in enumerate(A):\n res.add(a)\n j = i - 1\n cur = a\n while j >= 0 and cur < mask:\n cur |= A[j]\n res.add(cur)\n j -= 1\n return len(res)\n<|end_body_1|>\n", "class_docstring": "[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\"\"\"\n\n def subarrayBitwiseORs(self, A: List[int]) -> int:\n \"\"\"思路: 2个set存\"\"\"\n <|body_0|>\n\n def subarrayBitwiseORs2(self, A: List[int]) -> int:\n \"\"\"思路: 位运算剪枝来代替两个set轮流io\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = set()\n B = set()\n for a in A:\n B = {b | a for b in B} | {a}\n res |= B\n return len(res)\n<|end_body_0|>\n\n<|body_start_1|>\n max_a = max(A)\n mask = 1\n while mask <= max_a:\n mask <<= 1\n mask -= 1\n res = set()\n for i, a in enumerate(A):\n res.add(a)\n j = i - 1\n cur = a\n while j >= 0 and cur < mask:\n cur |= A[j]\n res.add(cur)\n j -= 1\n return len(res)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000177", "length_bytes": 1297, "license_type": "no_license", "methods": [{"docstring": "思路: 2个set存", "name": "subarrayBitwiseORs", "signature": "def subarrayBitwiseORs(self, A: List[int]) -> int"}, {"docstring": "思路: 位运算剪枝来代替两个set轮流io", "name": "subarrayBitwiseORs2", "signature": "def subarrayBitwiseORs2(self, A: List[int]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\n[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\n\nMethod signatures and docstrings:\n- def subarrayBitwiseORs(self, A: List[int]) -> int: 思路: 2个set存\n- def subarrayBitwiseORs2(self, A: List[int]) -> int: 思路: 位运算剪枝来代替两个set轮流io", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\n[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\n\nMethod signatures and docstrings:\n- def subarrayBitwiseORs(self, A: List[int]) -> int: 思路: 2个set存\n- def subarrayBitwiseORs2(self, A: List[int]) -> int: 思路: 位运算剪枝来代替两个set轮流io\n\n<|skeleton|>\nclass Solution:\n \"\"\"[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\"\"\"\n\n def subarrayBitwiseORs(self, A: List[int]) -> int:\n \"\"\"思路: 2个set存\"\"\"\n <|body_0|>\n\n def subarrayBitwiseORs2(self, A: List[int]) -> int:\n \"\"\"思路: 位运算剪枝来代替两个set轮流io\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = set()\n B = set()\n for a in A:\n B = {b | a for b in B} | {a}\n res |= B\n return len(res)\n<|end_body_0|>\n\n<|body_start_1|>\n max_a = max(A)\n mask = 1\n while mask <= max_a:\n mask <<= 1\n mask -= 1\n res = set()\n for i, a in enumerate(A):\n res.add(a)\n j = i - 1\n cur = a\n while j >= 0 and cur < mask:\n cur |= A[j]\n res.add(cur)\n j -= 1\n return len(res)\n<|end_body_1|>\n", "revision_id": "dbe8eb449e5b112a71bc1cd4eabfd138304de4a3", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\"\"\"\n\n def subarrayBitwiseORs(self, A: List[int]) -> int:\n \"\"\"思路: 2个set存\"\"\"\n <|body_0|>\n\n def subarrayBitwiseORs2(self, A: List[int]) -> int:\n \"\"\"思路: 位运算剪枝来代替两个set轮流io\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n \"\"\"[898. 子数组按位或操作](https://leetcode-cn.com/problems/bitwise-ors-of-subarrays/)\"\"\"\n\n def subarrayBitwiseORs(self, A: List[int]) -> int:\n \"\"\"思路: 2个set存\"\"\"\n res = set()\n B = set()\n for a in A:\n B = {b | a for b in B} | {a}\n res |= B\n return len(res)\n\n def subarrayBitwiseORs2(self, A: List[int]) -> int:\n \"\"\"思路: 位运算剪枝来代替两个set轮流io\"\"\"\n max_a = max(A)\n mask = 1\n while mask <= max_a:\n mask <<= 1\n mask -= 1\n res = set()\n for i, a in enumerate(A):\n res.add(a)\n j = i - 1\n cur = a\n while j >= 0 and cur < mask:\n cur |= A[j]\n res.add(cur)\n j -= 1\n return len(res)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/601-900/898.py", "source_repo": "Rivarrl/leetcode_python", "split": "val", "star_events_count": 3} {"blob_id": "2a1ed59759475048d6ebc5a084f95ee574414f71", "bodies": ["super().__init__(master=root, borderwidth=3, relief=SUNKEN)\nroot.title('Enamer v{major}.{minor}')\nroot.title('Hello, world!')\nself.grid(row=0, column=0)\nself.rowconfigure(0, weight=1)\nself.columnconfigure(0, weight=1)\nself._create_widgets()", "input_group = ttk.LabelFrame(self, text='Input filename:')\ninput_fname = ttk.Entry(input_group)\ninput_browse = ttk.Button(input_group, text='...')\nnaming_group = ttk.LabelFrame(self, text='Naming options:')\nnaming_entry = ttk.Entry(naming_group)\nnaming_options = ttk.Frame(naming_group)\nnaming_options.grid(row=0, column=1)\nnaming_author = ttk.Frame(naming_options)\nttk.Button(naming_author, text='>').grid(row=0, column=0)\nttk.Entry(naming_author).grid(row=0, column=1)\nnaming_author.grid(row=0, column=0)\nnaming_title = ttk.Frame(naming_options)\nttk.Button(naming_title, text='>').grid(row=0, column=0)\nttk.Entry(naming_title).grid(row=0, column=1)\nnaming_title.grid(row=1, column=0)\noutput_group = ttk.LabelFrame(self)\noutput_fname = ttk.Entry(self)\noutput_browse = ttk.Button(self, text='...')\nrename = ttk.Button(self, text='Rename')\ncancel = ttk.Button(self, text='Cancel')\ninput_group.grid(row=0, column=0, sticky=(W, E))\nnaming_group.grid(row=1, column=0, sticky=(W, E))\noutput_group.grid(row=2, column=0, sticky=(W, E))\ninput_fname.grid(row=0, column=1)\ninput_browse.grid(row=0, column=2)\nnaming_entry.grid(row=0, column=0)"], "bodies_text": "<|body_start_0|>\n super().__init__(master=root, borderwidth=3, relief=SUNKEN)\n root.title('Enamer v{major}.{minor}')\n root.title('Hello, world!')\n self.grid(row=0, column=0)\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self._create_widgets()\n<|end_body_0|>\n\n<|body_start_1|>\n input_group = ttk.LabelFrame(self, text='Input filename:')\n input_fname = ttk.Entry(input_group)\n input_browse = ttk.Button(input_group, text='...')\n naming_group = ttk.LabelFrame(self, text='Naming options:')\n naming_entry = ttk.Entry(naming_group)\n naming_options = ttk.Frame(naming_group)\n naming_options.grid(row=0, column=1)\n naming_author = ttk.Frame(naming_options)\n ttk.Button(naming_author, text='>').grid(row=0, column=0)\n ttk.Entry(naming_author).grid(row=0, column=1)\n naming_author.grid(row=0, column=0)\n naming_title = ttk.Frame(naming_options)\n ttk.Button(naming_title, text='>').grid(row=0, column=0)\n ttk.Entry(naming_title).grid(row=0, column=1)\n naming_title.grid(row=1, column=0)\n output_group = ttk.LabelFrame(self)\n output_fname = ttk.Entry(self)\n output_browse = ttk.Button(self, text='...')\n rename = ttk.Button(self, text='Rename')\n cancel = ttk.Button(self, text='Cancel')\n input_group.grid(row=0, column=0, sticky=(W, E))\n naming_group.grid(row=1, column=0, sticky=(W, E))\n output_group.grid(row=2, column=0, sticky=(W, E))\n input_fname.grid(row=0, column=1)\n input_browse.grid(row=0, column=2)\n naming_entry.grid(row=0, column=0)\n<|end_body_1|>\n", "class_docstring": "This is the program's main window.", "class_name": "EnameMainWindow", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnameMainWindow:\n \"\"\"This is the program's main window.\"\"\"\n\n def __init__(self, root=None):\n \"\"\"Initializes given frame instance.\"\"\"\n <|body_0|>\n\n def _create_widgets(self):\n \"\"\"Blah, blah, blah, ...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(master=root, borderwidth=3, relief=SUNKEN)\n root.title('Enamer v{major}.{minor}')\n root.title('Hello, world!')\n self.grid(row=0, column=0)\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self._create_widgets()\n<|end_body_0|>\n\n<|body_start_1|>\n input_group = ttk.LabelFrame(self, text='Input filename:')\n input_fname = ttk.Entry(input_group)\n input_browse = ttk.Button(input_group, text='...')\n naming_group = ttk.LabelFrame(self, text='Naming options:')\n naming_entry = ttk.Entry(naming_group)\n naming_options = ttk.Frame(naming_group)\n naming_options.grid(row=0, column=1)\n naming_author = ttk.Frame(naming_options)\n ttk.Button(naming_author, text='>').grid(row=0, column=0)\n ttk.Entry(naming_author).grid(row=0, column=1)\n naming_author.grid(row=0, column=0)\n naming_title = ttk.Frame(naming_options)\n ttk.Button(naming_title, text='>').grid(row=0, column=0)\n ttk.Entry(naming_title).grid(row=0, column=1)\n naming_title.grid(row=1, column=0)\n output_group = ttk.LabelFrame(self)\n output_fname = ttk.Entry(self)\n output_browse = ttk.Button(self, text='...')\n rename = ttk.Button(self, text='Rename')\n cancel = ttk.Button(self, text='Cancel')\n input_group.grid(row=0, column=0, sticky=(W, E))\n naming_group.grid(row=1, column=0, sticky=(W, E))\n output_group.grid(row=2, column=0, sticky=(W, E))\n input_fname.grid(row=0, column=1)\n input_browse.grid(row=0, column=2)\n naming_entry.grid(row=0, column=0)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000178", "length_bytes": 2243, "license_type": "no_license", "methods": [{"docstring": "Initializes given frame instance.", "name": "__init__", "signature": "def __init__(self, root=None)"}, {"docstring": "Blah, blah, blah, ...", "name": "_create_widgets", "signature": "def _create_widgets(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001011", "prompt": "Implement the Python class `EnameMainWindow` described below.\n\nClass description:\nThis is the program's main window.\n\nMethod signatures and docstrings:\n- def __init__(self, root=None): Initializes given frame instance.\n- def _create_widgets(self): Blah, blah, blah, ...", "prompted_full_text": "Implement the Python class `EnameMainWindow` described below.\n\nClass description:\nThis is the program's main window.\n\nMethod signatures and docstrings:\n- def __init__(self, root=None): Initializes given frame instance.\n- def _create_widgets(self): Blah, blah, blah, ...\n\n<|skeleton|>\nclass EnameMainWindow:\n \"\"\"This is the program's main window.\"\"\"\n\n def __init__(self, root=None):\n \"\"\"Initializes given frame instance.\"\"\"\n <|body_0|>\n\n def _create_widgets(self):\n \"\"\"Blah, blah, blah, ...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(master=root, borderwidth=3, relief=SUNKEN)\n root.title('Enamer v{major}.{minor}')\n root.title('Hello, world!')\n self.grid(row=0, column=0)\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self._create_widgets()\n<|end_body_0|>\n\n<|body_start_1|>\n input_group = ttk.LabelFrame(self, text='Input filename:')\n input_fname = ttk.Entry(input_group)\n input_browse = ttk.Button(input_group, text='...')\n naming_group = ttk.LabelFrame(self, text='Naming options:')\n naming_entry = ttk.Entry(naming_group)\n naming_options = ttk.Frame(naming_group)\n naming_options.grid(row=0, column=1)\n naming_author = ttk.Frame(naming_options)\n ttk.Button(naming_author, text='>').grid(row=0, column=0)\n ttk.Entry(naming_author).grid(row=0, column=1)\n naming_author.grid(row=0, column=0)\n naming_title = ttk.Frame(naming_options)\n ttk.Button(naming_title, text='>').grid(row=0, column=0)\n ttk.Entry(naming_title).grid(row=0, column=1)\n naming_title.grid(row=1, column=0)\n output_group = ttk.LabelFrame(self)\n output_fname = ttk.Entry(self)\n output_browse = ttk.Button(self, text='...')\n rename = ttk.Button(self, text='Rename')\n cancel = ttk.Button(self, text='Cancel')\n input_group.grid(row=0, column=0, sticky=(W, E))\n naming_group.grid(row=1, column=0, sticky=(W, E))\n output_group.grid(row=2, column=0, sticky=(W, E))\n input_fname.grid(row=0, column=1)\n input_browse.grid(row=0, column=2)\n naming_entry.grid(row=0, column=0)\n<|end_body_1|>\n", "revision_id": "79ac4d935fba252ff18274fc1085a585f530e641", "skeleton": "<|skeleton|>\nclass EnameMainWindow:\n \"\"\"This is the program's main window.\"\"\"\n\n def __init__(self, root=None):\n \"\"\"Initializes given frame instance.\"\"\"\n <|body_0|>\n\n def _create_widgets(self):\n \"\"\"Blah, blah, blah, ...\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EnameMainWindow:\n \"\"\"This is the program's main window.\"\"\"\n\n def __init__(self, root=None):\n \"\"\"Initializes given frame instance.\"\"\"\n super().__init__(master=root, borderwidth=3, relief=SUNKEN)\n root.title('Enamer v{major}.{minor}')\n root.title('Hello, world!')\n self.grid(row=0, column=0)\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self._create_widgets()\n\n def _create_widgets(self):\n \"\"\"Blah, blah, blah, ...\"\"\"\n input_group = ttk.LabelFrame(self, text='Input filename:')\n input_fname = ttk.Entry(input_group)\n input_browse = ttk.Button(input_group, text='...')\n naming_group = ttk.LabelFrame(self, text='Naming options:')\n naming_entry = ttk.Entry(naming_group)\n naming_options = ttk.Frame(naming_group)\n naming_options.grid(row=0, column=1)\n naming_author = ttk.Frame(naming_options)\n ttk.Button(naming_author, text='>').grid(row=0, column=0)\n ttk.Entry(naming_author).grid(row=0, column=1)\n naming_author.grid(row=0, column=0)\n naming_title = ttk.Frame(naming_options)\n ttk.Button(naming_title, text='>').grid(row=0, column=0)\n ttk.Entry(naming_title).grid(row=0, column=1)\n naming_title.grid(row=1, column=0)\n output_group = ttk.LabelFrame(self)\n output_fname = ttk.Entry(self)\n output_browse = ttk.Button(self, text='...')\n rename = ttk.Button(self, text='Rename')\n cancel = ttk.Button(self, text='Cancel')\n input_group.grid(row=0, column=0, sticky=(W, E))\n naming_group.grid(row=1, column=0, sticky=(W, E))\n output_group.grid(row=2, column=0, sticky=(W, E))\n input_fname.grid(row=0, column=1)\n input_browse.grid(row=0, column=2)\n naming_entry.grid(row=0, column=0)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/Utils/Ename/ename.py", "source_repo": "tnotstar/tnotbox", "split": "val", "star_events_count": 0} {"blob_id": "6ac7ddbe33c3dc6c46306d2dae1c664e644ad1ac", "bodies": ["super(Agent_DQN, self).__init__(env)\nself.hparams = tf.contrib.training.HParams(n_actions=self.env.action_space.n, total_episode=100000, init_step=10000, exploration_step=1000000, epsilon_init=1.0, epsilon_min=0.025, n_obs=list(self.env.env.observation_space.shape), gamma=0.99, learning_rate=0.0001, use_dueling=False, use_ddqn=False, checkpoint_path='./checkpoints/agent_dqn', history_rewards_file='./history_rewards.npy', save_interval=100000, target_update_interval=1000, train_interval=4, replay_size=10000)\nself.epsilon = self.hparams.epsilon_init\nself.epsilon_decay = (self.hparams.epsilon_init - self.hparams.epsilon_min) / self.hparams.exploration_step\nself.dqn = DQN(self.hparams)\nself.session = tf.Session()\nif args.test_dqn:\n tf.logging.info('loading trained model')\nself.dqn.load(self.session, self.hparams.checkpoint_path)", "self.i_step = -1\nself.history_rewards = []\nself.replay_buf = deque(maxlen=self.hparams.replay_size)", "tf.logging.info('...... start training ......')\nself.init_game_setting()\nrunning_reward = None\nfor i_episode in range(self.hparams.total_episode):\n state = self.env.reset()\n done = False\n cumulate_reward = 0\n episode_steps = 0\n while not done:\n self.i_step += 1\n episode_steps += 1\n action = self.make_action(state, test=False)\n next_state, reward, done, info = self.env.step(action)\n self.replay_buf.append([state, action, reward, next_state, int(done)])\n state = next_state\n cumulate_reward += reward\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.train_interval == 0:\n self.dqn.train(self.session, self.replay_buf)\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.target_update_interval == 0:\n self.dqn.cp2targetnet(self.session)\n if self.i_step % self.hparams.save_interval == 0:\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n running_reward = cumulate_reward if running_reward is None else running_reward * 0.99 + cumulate_reward * 0.01\n self.history_rewards.append(running_reward)\n tf.logging.info('I_EPISODE: {:06d} | EPISODE_STEPS: {:03d} | I_STEP: {:09d} | EPSILON: {:.5f} | CUR_REWARD: {:2.3f} | AVG_REWARD: {:2.3f}'.format(i_episode, episode_steps, self.i_step, self.epsilon, cumulate_reward, running_reward))\nnp.save(self.hparams.history_rewards_file, self.history_rewards)\nself.dqn.save(self.session, self.hparams.checkpoint_path)", "if test:\n if random.random() < self.hparams.epsilon_min:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\nelse:\n if self.epsilon > self.hparams.epsilon_min and self.i_step > self.hparams.init_step:\n self.epsilon = self.epsilon - self.epsilon_decay\n if random.random() < self.epsilon:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])"], "bodies_text": "<|body_start_0|>\n super(Agent_DQN, self).__init__(env)\n self.hparams = tf.contrib.training.HParams(n_actions=self.env.action_space.n, total_episode=100000, init_step=10000, exploration_step=1000000, epsilon_init=1.0, epsilon_min=0.025, n_obs=list(self.env.env.observation_space.shape), gamma=0.99, learning_rate=0.0001, use_dueling=False, use_ddqn=False, checkpoint_path='./checkpoints/agent_dqn', history_rewards_file='./history_rewards.npy', save_interval=100000, target_update_interval=1000, train_interval=4, replay_size=10000)\n self.epsilon = self.hparams.epsilon_init\n self.epsilon_decay = (self.hparams.epsilon_init - self.hparams.epsilon_min) / self.hparams.exploration_step\n self.dqn = DQN(self.hparams)\n self.session = tf.Session()\n if args.test_dqn:\n tf.logging.info('loading trained model')\n self.dqn.load(self.session, self.hparams.checkpoint_path)\n<|end_body_0|>\n\n<|body_start_1|>\n self.i_step = -1\n self.history_rewards = []\n self.replay_buf = deque(maxlen=self.hparams.replay_size)\n<|end_body_1|>\n\n<|body_start_2|>\n tf.logging.info('...... start training ......')\n self.init_game_setting()\n running_reward = None\n for i_episode in range(self.hparams.total_episode):\n state = self.env.reset()\n done = False\n cumulate_reward = 0\n episode_steps = 0\n while not done:\n self.i_step += 1\n episode_steps += 1\n action = self.make_action(state, test=False)\n next_state, reward, done, info = self.env.step(action)\n self.replay_buf.append([state, action, reward, next_state, int(done)])\n state = next_state\n cumulate_reward += reward\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.train_interval == 0:\n self.dqn.train(self.session, self.replay_buf)\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.target_update_interval == 0:\n self.dqn.cp2targetnet(self.session)\n if self.i_step % self.hparams.save_interval == 0:\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n running_reward = cumulate_reward if running_reward is None else running_reward * 0.99 + cumulate_reward * 0.01\n self.history_rewards.append(running_reward)\n tf.logging.info('I_EPISODE: {:06d} | EPISODE_STEPS: {:03d} | I_STEP: {:09d} | EPSILON: {:.5f} | CUR_REWARD: {:2.3f} | AVG_REWARD: {:2.3f}'.format(i_episode, episode_steps, self.i_step, self.epsilon, cumulate_reward, running_reward))\n np.save(self.hparams.history_rewards_file, self.history_rewards)\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n<|end_body_2|>\n\n<|body_start_3|>\n if test:\n if random.random() < self.hparams.epsilon_min:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n else:\n if self.epsilon > self.hparams.epsilon_min and self.i_step > self.hparams.init_step:\n self.epsilon = self.epsilon - self.epsilon_decay\n if random.random() < self.epsilon:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Agent_DQN", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Agent_DQN:\n\n def __init__(self, env, args):\n \"\"\"Initialize every things you need here. For example: building your model\"\"\"\n <|body_0|>\n\n def init_game_setting(self):\n \"\"\"Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\"\"\"\n <|body_1|>\n\n def train(self):\n \"\"\"Implement your training algorithm here\"\"\"\n <|body_2|>\n\n def make_action(self, observation, test=True):\n \"\"\"Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Agent_DQN, self).__init__(env)\n self.hparams = tf.contrib.training.HParams(n_actions=self.env.action_space.n, total_episode=100000, init_step=10000, exploration_step=1000000, epsilon_init=1.0, epsilon_min=0.025, n_obs=list(self.env.env.observation_space.shape), gamma=0.99, learning_rate=0.0001, use_dueling=False, use_ddqn=False, checkpoint_path='./checkpoints/agent_dqn', history_rewards_file='./history_rewards.npy', save_interval=100000, target_update_interval=1000, train_interval=4, replay_size=10000)\n self.epsilon = self.hparams.epsilon_init\n self.epsilon_decay = (self.hparams.epsilon_init - self.hparams.epsilon_min) / self.hparams.exploration_step\n self.dqn = DQN(self.hparams)\n self.session = tf.Session()\n if args.test_dqn:\n tf.logging.info('loading trained model')\n self.dqn.load(self.session, self.hparams.checkpoint_path)\n<|end_body_0|>\n\n<|body_start_1|>\n self.i_step = -1\n self.history_rewards = []\n self.replay_buf = deque(maxlen=self.hparams.replay_size)\n<|end_body_1|>\n\n<|body_start_2|>\n tf.logging.info('...... start training ......')\n self.init_game_setting()\n running_reward = None\n for i_episode in range(self.hparams.total_episode):\n state = self.env.reset()\n done = False\n cumulate_reward = 0\n episode_steps = 0\n while not done:\n self.i_step += 1\n episode_steps += 1\n action = self.make_action(state, test=False)\n next_state, reward, done, info = self.env.step(action)\n self.replay_buf.append([state, action, reward, next_state, int(done)])\n state = next_state\n cumulate_reward += reward\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.train_interval == 0:\n self.dqn.train(self.session, self.replay_buf)\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.target_update_interval == 0:\n self.dqn.cp2targetnet(self.session)\n if self.i_step % self.hparams.save_interval == 0:\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n running_reward = cumulate_reward if running_reward is None else running_reward * 0.99 + cumulate_reward * 0.01\n self.history_rewards.append(running_reward)\n tf.logging.info('I_EPISODE: {:06d} | EPISODE_STEPS: {:03d} | I_STEP: {:09d} | EPSILON: {:.5f} | CUR_REWARD: {:2.3f} | AVG_REWARD: {:2.3f}'.format(i_episode, episode_steps, self.i_step, self.epsilon, cumulate_reward, running_reward))\n np.save(self.hparams.history_rewards_file, self.history_rewards)\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n<|end_body_2|>\n\n<|body_start_3|>\n if test:\n if random.random() < self.hparams.epsilon_min:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n else:\n if self.epsilon > self.hparams.epsilon_min and self.i_step > self.hparams.init_step:\n self.epsilon = self.epsilon - self.epsilon_decay\n if random.random() < self.epsilon:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000179", "length_bytes": 13991, "license_type": "no_license", "methods": [{"docstring": "Initialize every things you need here. For example: building your model", "name": "__init__", "signature": "def __init__(self, env, args)"}, {"docstring": "Testing function will call this function at the begining of new game Put anything you want to initialize if necessary", "name": "init_game_setting", "signature": "def init_game_setting(self)"}, {"docstring": "Implement your training algorithm here", "name": "train", "signature": "def train(self)"}, {"docstring": "Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model", "name": "make_action", "signature": "def make_action(self, observation, test=True)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_001838", "prompt": "Implement the Python class `Agent_DQN` described below.\n\nClass description:\nImplement the Agent_DQN class.\n\nMethod signatures and docstrings:\n- def __init__(self, env, args): Initialize every things you need here. For example: building your model\n- def init_game_setting(self): Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\n- def train(self): Implement your training algorithm here\n- def make_action(self, observation, test=True): Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model", "prompted_full_text": "Implement the Python class `Agent_DQN` described below.\n\nClass description:\nImplement the Agent_DQN class.\n\nMethod signatures and docstrings:\n- def __init__(self, env, args): Initialize every things you need here. For example: building your model\n- def init_game_setting(self): Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\n- def train(self): Implement your training algorithm here\n- def make_action(self, observation, test=True): Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model\n\n<|skeleton|>\nclass Agent_DQN:\n\n def __init__(self, env, args):\n \"\"\"Initialize every things you need here. For example: building your model\"\"\"\n <|body_0|>\n\n def init_game_setting(self):\n \"\"\"Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\"\"\"\n <|body_1|>\n\n def train(self):\n \"\"\"Implement your training algorithm here\"\"\"\n <|body_2|>\n\n def make_action(self, observation, test=True):\n \"\"\"Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Agent_DQN, self).__init__(env)\n self.hparams = tf.contrib.training.HParams(n_actions=self.env.action_space.n, total_episode=100000, init_step=10000, exploration_step=1000000, epsilon_init=1.0, epsilon_min=0.025, n_obs=list(self.env.env.observation_space.shape), gamma=0.99, learning_rate=0.0001, use_dueling=False, use_ddqn=False, checkpoint_path='./checkpoints/agent_dqn', history_rewards_file='./history_rewards.npy', save_interval=100000, target_update_interval=1000, train_interval=4, replay_size=10000)\n self.epsilon = self.hparams.epsilon_init\n self.epsilon_decay = (self.hparams.epsilon_init - self.hparams.epsilon_min) / self.hparams.exploration_step\n self.dqn = DQN(self.hparams)\n self.session = tf.Session()\n if args.test_dqn:\n tf.logging.info('loading trained model')\n self.dqn.load(self.session, self.hparams.checkpoint_path)\n<|end_body_0|>\n\n<|body_start_1|>\n self.i_step = -1\n self.history_rewards = []\n self.replay_buf = deque(maxlen=self.hparams.replay_size)\n<|end_body_1|>\n\n<|body_start_2|>\n tf.logging.info('...... start training ......')\n self.init_game_setting()\n running_reward = None\n for i_episode in range(self.hparams.total_episode):\n state = self.env.reset()\n done = False\n cumulate_reward = 0\n episode_steps = 0\n while not done:\n self.i_step += 1\n episode_steps += 1\n action = self.make_action(state, test=False)\n next_state, reward, done, info = self.env.step(action)\n self.replay_buf.append([state, action, reward, next_state, int(done)])\n state = next_state\n cumulate_reward += reward\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.train_interval == 0:\n self.dqn.train(self.session, self.replay_buf)\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.target_update_interval == 0:\n self.dqn.cp2targetnet(self.session)\n if self.i_step % self.hparams.save_interval == 0:\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n running_reward = cumulate_reward if running_reward is None else running_reward * 0.99 + cumulate_reward * 0.01\n self.history_rewards.append(running_reward)\n tf.logging.info('I_EPISODE: {:06d} | EPISODE_STEPS: {:03d} | I_STEP: {:09d} | EPSILON: {:.5f} | CUR_REWARD: {:2.3f} | AVG_REWARD: {:2.3f}'.format(i_episode, episode_steps, self.i_step, self.epsilon, cumulate_reward, running_reward))\n np.save(self.hparams.history_rewards_file, self.history_rewards)\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n<|end_body_2|>\n\n<|body_start_3|>\n if test:\n if random.random() < self.hparams.epsilon_min:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n else:\n if self.epsilon > self.hparams.epsilon_min and self.i_step > self.hparams.init_step:\n self.epsilon = self.epsilon - self.epsilon_decay\n if random.random() < self.epsilon:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n<|end_body_3|>\n", "revision_id": "5a3069ab61b6f35b1e890a38b91b99691a023b44", "skeleton": "<|skeleton|>\nclass Agent_DQN:\n\n def __init__(self, env, args):\n \"\"\"Initialize every things you need here. For example: building your model\"\"\"\n <|body_0|>\n\n def init_game_setting(self):\n \"\"\"Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\"\"\"\n <|body_1|>\n\n def train(self):\n \"\"\"Implement your training algorithm here\"\"\"\n <|body_2|>\n\n def make_action(self, observation, test=True):\n \"\"\"Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Agent_DQN:\n def __init__(self, env, args):\n \"\"\"Initialize every things you need here. For example: building your model\"\"\"\n super(Agent_DQN, self).__init__(env)\n self.hparams = tf.contrib.training.HParams(n_actions=self.env.action_space.n, total_episode=100000, init_step=10000, exploration_step=1000000, epsilon_init=1.0, epsilon_min=0.025, n_obs=list(self.env.env.observation_space.shape), gamma=0.99, learning_rate=0.0001, use_dueling=False, use_ddqn=False, checkpoint_path='./checkpoints/agent_dqn', history_rewards_file='./history_rewards.npy', save_interval=100000, target_update_interval=1000, train_interval=4, replay_size=10000)\n self.epsilon = self.hparams.epsilon_init\n self.epsilon_decay = (self.hparams.epsilon_init - self.hparams.epsilon_min) / self.hparams.exploration_step\n self.dqn = DQN(self.hparams)\n self.session = tf.Session()\n if args.test_dqn:\n tf.logging.info('loading trained model')\n self.dqn.load(self.session, self.hparams.checkpoint_path)\n\n def init_game_setting(self):\n \"\"\"Testing function will call this function at the begining of new game Put anything you want to initialize if necessary\"\"\"\n self.i_step = -1\n self.history_rewards = []\n self.replay_buf = deque(maxlen=self.hparams.replay_size)\n\n def train(self):\n \"\"\"Implement your training algorithm here\"\"\"\n tf.logging.info('...... start training ......')\n self.init_game_setting()\n running_reward = None\n for i_episode in range(self.hparams.total_episode):\n state = self.env.reset()\n done = False\n cumulate_reward = 0\n episode_steps = 0\n while not done:\n self.i_step += 1\n episode_steps += 1\n action = self.make_action(state, test=False)\n next_state, reward, done, info = self.env.step(action)\n self.replay_buf.append([state, action, reward, next_state, int(done)])\n state = next_state\n cumulate_reward += reward\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.train_interval == 0:\n self.dqn.train(self.session, self.replay_buf)\n if self.i_step >= self.hparams.init_step and self.i_step % self.hparams.target_update_interval == 0:\n self.dqn.cp2targetnet(self.session)\n if self.i_step % self.hparams.save_interval == 0:\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n running_reward = cumulate_reward if running_reward is None else running_reward * 0.99 + cumulate_reward * 0.01\n self.history_rewards.append(running_reward)\n tf.logging.info('I_EPISODE: {:06d} | EPISODE_STEPS: {:03d} | I_STEP: {:09d} | EPSILON: {:.5f} | CUR_REWARD: {:2.3f} | AVG_REWARD: {:2.3f}'.format(i_episode, episode_steps, self.i_step, self.epsilon, cumulate_reward, running_reward))\n np.save(self.hparams.history_rewards_file, self.history_rewards)\n self.dqn.save(self.session, self.hparams.checkpoint_path)\n\n def make_action(self, observation, test=True):\n \"\"\"Return predicted action of your agent Input: observation: np.array stack 4 last preprocessed frames, shape: (84, 84, 4) Return: action: int the predicted action from trained model\"\"\"\n if test:\n if random.random() < self.hparams.epsilon_min:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n else:\n if self.epsilon > self.hparams.epsilon_min and self.i_step > self.hparams.init_step:\n self.epsilon = self.epsilon - self.epsilon_decay\n if random.random() < self.epsilon:\n return random.randrange(self.hparams.n_actions)\n output = self.dqn.predict(self.session, [observation])\n return np.argmax(output[0])\n", "source": "the_stack_v2_python_sparse", "source_path": "rl/AtariGames/agent_dir/agent_dqn.py", "source_repo": "TVect/VectorLab", "split": "val", "star_events_count": 0} {"blob_id": "5e44e4bb4f408e9ac67762d7583adbb6c023bf31", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn DomainDnsSrvRecord()", "from .domain_dns_record import DomainDnsRecord\nfrom .domain_dns_record import DomainDnsRecord\nfields: Dict[str, Callable[[Any], None]] = {'nameTarget': lambda n: setattr(self, 'name_target', n.get_str_value()), 'port': lambda n: setattr(self, 'port', n.get_int_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'protocol': lambda n: setattr(self, 'protocol', n.get_str_value()), 'service': lambda n: setattr(self, 'service', n.get_str_value()), 'weight': lambda n: setattr(self, 'weight', n.get_int_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('nameTarget', self.name_target)\nwriter.write_int_value('port', self.port)\nwriter.write_int_value('priority', self.priority)\nwriter.write_str_value('protocol', self.protocol)\nwriter.write_str_value('service', self.service)\nwriter.write_int_value('weight', self.weight)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DomainDnsSrvRecord()\n<|end_body_0|>\n\n<|body_start_1|>\n from .domain_dns_record import DomainDnsRecord\n from .domain_dns_record import DomainDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'nameTarget': lambda n: setattr(self, 'name_target', n.get_str_value()), 'port': lambda n: setattr(self, 'port', n.get_int_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'protocol': lambda n: setattr(self, 'protocol', n.get_str_value()), 'service': lambda n: setattr(self, 'service', n.get_str_value()), 'weight': lambda n: setattr(self, 'weight', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('nameTarget', self.name_target)\n writer.write_int_value('port', self.port)\n writer.write_int_value('priority', self.priority)\n writer.write_str_value('protocol', self.protocol)\n writer.write_str_value('service', self.service)\n writer.write_int_value('weight', self.weight)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DomainDnsSrvRecord", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DomainDnsSrvRecord:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DomainDnsSrvRecord()\n<|end_body_0|>\n\n<|body_start_1|>\n from .domain_dns_record import DomainDnsRecord\n from .domain_dns_record import DomainDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'nameTarget': lambda n: setattr(self, 'name_target', n.get_str_value()), 'port': lambda n: setattr(self, 'port', n.get_int_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'protocol': lambda n: setattr(self, 'protocol', n.get_str_value()), 'service': lambda n: setattr(self, 'service', n.get_str_value()), 'weight': lambda n: setattr(self, 'weight', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('nameTarget', self.name_target)\n writer.write_int_value('port', self.port)\n writer.write_int_value('priority', self.priority)\n writer.write_str_value('protocol', self.protocol)\n writer.write_str_value('service', self.service)\n writer.write_int_value('weight', self.weight)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000180", "length_bytes": 3417, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001279", "prompt": "Implement the Python class `DomainDnsSrvRecord` described below.\n\nClass description:\nImplement the DomainDnsSrvRecord class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `DomainDnsSrvRecord` described below.\n\nClass description:\nImplement the DomainDnsSrvRecord class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass DomainDnsSrvRecord:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DomainDnsSrvRecord()\n<|end_body_0|>\n\n<|body_start_1|>\n from .domain_dns_record import DomainDnsRecord\n from .domain_dns_record import DomainDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'nameTarget': lambda n: setattr(self, 'name_target', n.get_str_value()), 'port': lambda n: setattr(self, 'port', n.get_int_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'protocol': lambda n: setattr(self, 'protocol', n.get_str_value()), 'service': lambda n: setattr(self, 'service', n.get_str_value()), 'weight': lambda n: setattr(self, 'weight', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('nameTarget', self.name_target)\n writer.write_int_value('port', self.port)\n writer.write_int_value('priority', self.priority)\n writer.write_str_value('protocol', self.protocol)\n writer.write_str_value('service', self.service)\n writer.write_int_value('weight', self.weight)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass DomainDnsSrvRecord:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DomainDnsSrvRecord:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DomainDnsSrvRecord:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DomainDnsSrvRecord\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return DomainDnsSrvRecord()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .domain_dns_record import DomainDnsRecord\n from .domain_dns_record import DomainDnsRecord\n fields: Dict[str, Callable[[Any], None]] = {'nameTarget': lambda n: setattr(self, 'name_target', n.get_str_value()), 'port': lambda n: setattr(self, 'port', n.get_int_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'protocol': lambda n: setattr(self, 'protocol', n.get_str_value()), 'service': lambda n: setattr(self, 'service', n.get_str_value()), 'weight': lambda n: setattr(self, 'weight', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('nameTarget', self.name_target)\n writer.write_int_value('port', self.port)\n writer.write_int_value('priority', self.priority)\n writer.write_str_value('protocol', self.protocol)\n writer.write_str_value('service', self.service)\n writer.write_int_value('weight', self.weight)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/domain_dns_srv_record.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135} {"blob_id": "e57bcd00437420e1587ad9161c5f0376240d42d8", "bodies": ["ans = [[]]\nfor n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n print(i, l, new_ans)\n if i < len(l) and l[i] == n:\n print('skip')\n break\n ans = new_ans\nreturn ans", "def dfs(partial, memo):\n print(partial, memo)\n if len(partial) == len(nums):\n res.append(partial)\n return\n cache = set()\n for i in range(len(nums)):\n if i in memo or nums[i] in cache:\n continue\n memo.add(i)\n cache.add(nums[i])\n dfs(partial + [nums[i]], memo)\n memo.remove(i)\nres = []\ndfs([], set())\nreturn res"], "bodies_text": "<|body_start_0|>\n ans = [[]]\n for n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n print(i, l, new_ans)\n if i < len(l) and l[i] == n:\n print('skip')\n break\n ans = new_ans\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(partial, memo):\n print(partial, memo)\n if len(partial) == len(nums):\n res.append(partial)\n return\n cache = set()\n for i in range(len(nums)):\n if i in memo or nums[i] in cache:\n continue\n memo.add(i)\n cache.add(nums[i])\n dfs(partial + [nums[i]], memo)\n memo.remove(i)\n res = []\n dfs([], set())\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = [[]]\n for n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n print(i, l, new_ans)\n if i < len(l) and l[i] == n:\n print('skip')\n break\n ans = new_ans\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(partial, memo):\n print(partial, memo)\n if len(partial) == len(nums):\n res.append(partial)\n return\n cache = set()\n for i in range(len(nums)):\n if i in memo or nums[i] in cache:\n continue\n memo.add(i)\n cache.add(nums[i])\n dfs(partial + [nums[i]], memo)\n memo.remove(i)\n res = []\n dfs([], set())\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000181", "length_bytes": 2807, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "permuteUnique", "signature": "def permuteUnique(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "permuteUnique", "signature": "def permuteUnique(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = [[]]\n for n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n print(i, l, new_ans)\n if i < len(l) and l[i] == n:\n print('skip')\n break\n ans = new_ans\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(partial, memo):\n print(partial, memo)\n if len(partial) == len(nums):\n res.append(partial)\n return\n cache = set()\n for i in range(len(nums)):\n if i in memo or nums[i] in cache:\n continue\n memo.add(i)\n cache.add(nums[i])\n dfs(partial + [nums[i]], memo)\n memo.remove(i)\n res = []\n dfs([], set())\n return res\n<|end_body_1|>\n", "revision_id": "f3fc71f344cd758cfce77f16ab72992c99ab288e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n ans = [[]]\n for n in nums:\n new_ans = []\n for l in ans:\n for i in range(len(l) + 1):\n new_ans.append(l[:i] + [n] + l[i:])\n print(i, l, new_ans)\n if i < len(l) and l[i] == n:\n print('skip')\n break\n ans = new_ans\n return ans\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n def dfs(partial, memo):\n print(partial, memo)\n if len(partial) == len(nums):\n res.append(partial)\n return\n cache = set()\n for i in range(len(nums)):\n if i in memo or nums[i] in cache:\n continue\n memo.add(i)\n cache.add(nums[i])\n dfs(partial + [nums[i]], memo)\n memo.remove(i)\n res = []\n dfs([], set())\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "47_permutations2.py", "source_repo": "jennyChing/leetCode", "split": "val", "star_events_count": 2} {"blob_id": "71e3d0c8d8f005f7e20bd56419bd3cedd0ea05e0", "bodies": ["self.pool_name = pool_name\nself.subnet = subnet\nself.use_smart_connect = use_smart_connect", "if dictionary is None:\n return None\npool_name = dictionary.get('poolName')\nsubnet = dictionary.get('subnet')\nuse_smart_connect = dictionary.get('useSmartConnect')\nreturn cls(pool_name, subnet, use_smart_connect)"], "bodies_text": "<|body_start_0|>\n self.pool_name = pool_name\n self.subnet = subnet\n self.use_smart_connect = use_smart_connect\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pool_name = dictionary.get('poolName')\n subnet = dictionary.get('subnet')\n use_smart_connect = dictionary.get('useSmartConnect')\n return cls(pool_name, subnet, use_smart_connect)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.", "class_name": "NetworkPoolConfig", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NetworkPoolConfig:\n \"\"\"Implementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\"\"\"\n\n def __init__(self, pool_name=None, subnet=None, use_smart_connect=None):\n \"\"\"Constructor for the NetworkPoolConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pool_name = pool_name\n self.subnet = subnet\n self.use_smart_connect = use_smart_connect\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pool_name = dictionary.get('poolName')\n subnet = dictionary.get('subnet')\n use_smart_connect = dictionary.get('useSmartConnect')\n return cls(pool_name, subnet, use_smart_connect)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000182", "length_bytes": 2126, "license_type": "permissive", "methods": [{"docstring": "Constructor for the NetworkPoolConfig class", "name": "__init__", "signature": "def __init__(self, pool_name=None, subnet=None, use_smart_connect=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `NetworkPoolConfig` described below.\n\nClass description:\nImplementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\n\nMethod signatures and docstrings:\n- def __init__(self, pool_name=None, subnet=None, use_smart_connect=None): Constructor for the NetworkPoolConfig class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `NetworkPoolConfig` described below.\n\nClass description:\nImplementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\n\nMethod signatures and docstrings:\n- def __init__(self, pool_name=None, subnet=None, use_smart_connect=None): Constructor for the NetworkPoolConfig class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass NetworkPoolConfig:\n \"\"\"Implementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\"\"\"\n\n def __init__(self, pool_name=None, subnet=None, use_smart_connect=None):\n \"\"\"Constructor for the NetworkPoolConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pool_name = pool_name\n self.subnet = subnet\n self.use_smart_connect = use_smart_connect\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n pool_name = dictionary.get('poolName')\n subnet = dictionary.get('subnet')\n use_smart_connect = dictionary.get('useSmartConnect')\n return cls(pool_name, subnet, use_smart_connect)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass NetworkPoolConfig:\n \"\"\"Implementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\"\"\"\n\n def __init__(self, pool_name=None, subnet=None, use_smart_connect=None):\n \"\"\"Constructor for the NetworkPoolConfig class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NetworkPoolConfig:\n \"\"\"Implementation of the 'NetworkPoolConfig' model. While caonfiguring the isilon protection source, this is the selected network pool config for the isilon access zone. Attributes: pool_name (string): Specifies the name of the Network pool. subnet (string): Specifies the name of the subnet the network pool belongs to. use_smart_connect (bool): Specifies whether to use SmartConnect if available. If true, DNS name for the SmartConnect zone will be used to balance the IPs. Otherwise, pool IPs will be balanced manually.\"\"\"\n\n def __init__(self, pool_name=None, subnet=None, use_smart_connect=None):\n \"\"\"Constructor for the NetworkPoolConfig class\"\"\"\n self.pool_name = pool_name\n self.subnet = subnet\n self.use_smart_connect = use_smart_connect\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n pool_name = dictionary.get('poolName')\n subnet = dictionary.get('subnet')\n use_smart_connect = dictionary.get('useSmartConnect')\n return cls(pool_name, subnet, use_smart_connect)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/network_pool_config.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "799da106b6879624bb9b3203814a10f4b270d932", "bodies": ["if not prices:\n return 0\nn = len(prices)\n\n@lru_cache(None)\ndef dfs(index, status, k):\n if index == n or k == 0:\n return 0\n a, b, c = (0, 0, 0)\n a = dfs(index + 1, status, k)\n if status:\n b = dfs(index + 1, 0, k - 1) + prices[index]\n else:\n c = dfs(index + 1, 1, k) - prices[index]\n return max(a, b, c)\nreturn dfs(0, 0, 2)", "if not prices:\n return 0\nn = len(prices)\ndp = [[[0] * 2 for _ in range(3)] for _ in range(n)]\nfor i in range(3):\n dp[0][i][0] = -prices[0]\nfor i in range(1, n):\n for j in range(1, 3):\n dp[i][j - 1][0] = max(dp[i - 1][j - 1][0], dp[i - 1][j - 1][1] - prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] + prices[i])\nreturn dp[-1][2][1]", "n = len(prices)\nbuy1, buy2 = (-prices[0], -prices[0])\nsell1, sell2 = (0, 0)\nfor i in range(1, n):\n buy1 = max(buy1, -prices[i])\n sell1 = max(sell1, buy1 + prices[i])\n buy2 = max(buy2, sell1 - prices[i])\n sell2 = max(sell2, buy2 + prices[i])\nreturn sell2"], "bodies_text": "<|body_start_0|>\n if not prices:\n return 0\n n = len(prices)\n\n @lru_cache(None)\n def dfs(index, status, k):\n if index == n or k == 0:\n return 0\n a, b, c = (0, 0, 0)\n a = dfs(index + 1, status, k)\n if status:\n b = dfs(index + 1, 0, k - 1) + prices[index]\n else:\n c = dfs(index + 1, 1, k) - prices[index]\n return max(a, b, c)\n return dfs(0, 0, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n n = len(prices)\n dp = [[[0] * 2 for _ in range(3)] for _ in range(n)]\n for i in range(3):\n dp[0][i][0] = -prices[0]\n for i in range(1, n):\n for j in range(1, 3):\n dp[i][j - 1][0] = max(dp[i - 1][j - 1][0], dp[i - 1][j - 1][1] - prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] + prices[i])\n return dp[-1][2][1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(prices)\n buy1, buy2 = (-prices[0], -prices[0])\n sell1, sell2 = (0, 0)\n for i in range(1, n):\n buy1 = max(buy1, -prices[i])\n sell1 = max(sell1, buy1 + prices[i])\n buy2 = max(buy2, sell1 - prices[i])\n sell2 = max(sell2, buy2 + prices[i])\n return sell2\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProfit1(self, prices: List[int]) -> int:\n \"\"\"思路:记忆化递归 @param prices: @return:\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices: List[int]) -> int:\n \"\"\"思路:动态规划法 @param prices: @return:\"\"\"\n <|body_1|>\n\n def maxProfit3(self, prices: List[int]) -> int:\n \"\"\"思路:两个状态 @param prices: @return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not prices:\n return 0\n n = len(prices)\n\n @lru_cache(None)\n def dfs(index, status, k):\n if index == n or k == 0:\n return 0\n a, b, c = (0, 0, 0)\n a = dfs(index + 1, status, k)\n if status:\n b = dfs(index + 1, 0, k - 1) + prices[index]\n else:\n c = dfs(index + 1, 1, k) - prices[index]\n return max(a, b, c)\n return dfs(0, 0, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n n = len(prices)\n dp = [[[0] * 2 for _ in range(3)] for _ in range(n)]\n for i in range(3):\n dp[0][i][0] = -prices[0]\n for i in range(1, n):\n for j in range(1, 3):\n dp[i][j - 1][0] = max(dp[i - 1][j - 1][0], dp[i - 1][j - 1][1] - prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] + prices[i])\n return dp[-1][2][1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(prices)\n buy1, buy2 = (-prices[0], -prices[0])\n sell1, sell2 = (0, 0)\n for i in range(1, n):\n buy1 = max(buy1, -prices[i])\n sell1 = max(sell1, buy1 + prices[i])\n buy2 = max(buy2, sell1 - prices[i])\n sell2 = max(sell2, buy2 + prices[i])\n return sell2\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000183", "length_bytes": 3529, "license_type": "no_license", "methods": [{"docstring": "思路:记忆化递归 @param prices: @return:", "name": "maxProfit1", "signature": "def maxProfit1(self, prices: List[int]) -> int"}, {"docstring": "思路:动态规划法 @param prices: @return:", "name": "maxProfit2", "signature": "def maxProfit2(self, prices: List[int]) -> int"}, {"docstring": "思路:两个状态 @param prices: @return:", "name": "maxProfit3", "signature": "def maxProfit3(self, prices: List[int]) -> int"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit1(self, prices: List[int]) -> int: 思路:记忆化递归 @param prices: @return:\n- def maxProfit2(self, prices: List[int]) -> int: 思路:动态规划法 @param prices: @return:\n- def maxProfit3(self, prices: List[int]) -> int: 思路:两个状态 @param prices: @return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit1(self, prices: List[int]) -> int: 思路:记忆化递归 @param prices: @return:\n- def maxProfit2(self, prices: List[int]) -> int: 思路:动态规划法 @param prices: @return:\n- def maxProfit3(self, prices: List[int]) -> int: 思路:两个状态 @param prices: @return:\n\n<|skeleton|>\nclass Solution:\n\n def maxProfit1(self, prices: List[int]) -> int:\n \"\"\"思路:记忆化递归 @param prices: @return:\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices: List[int]) -> int:\n \"\"\"思路:动态规划法 @param prices: @return:\"\"\"\n <|body_1|>\n\n def maxProfit3(self, prices: List[int]) -> int:\n \"\"\"思路:两个状态 @param prices: @return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not prices:\n return 0\n n = len(prices)\n\n @lru_cache(None)\n def dfs(index, status, k):\n if index == n or k == 0:\n return 0\n a, b, c = (0, 0, 0)\n a = dfs(index + 1, status, k)\n if status:\n b = dfs(index + 1, 0, k - 1) + prices[index]\n else:\n c = dfs(index + 1, 1, k) - prices[index]\n return max(a, b, c)\n return dfs(0, 0, 2)\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n n = len(prices)\n dp = [[[0] * 2 for _ in range(3)] for _ in range(n)]\n for i in range(3):\n dp[0][i][0] = -prices[0]\n for i in range(1, n):\n for j in range(1, 3):\n dp[i][j - 1][0] = max(dp[i - 1][j - 1][0], dp[i - 1][j - 1][1] - prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] + prices[i])\n return dp[-1][2][1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(prices)\n buy1, buy2 = (-prices[0], -prices[0])\n sell1, sell2 = (0, 0)\n for i in range(1, n):\n buy1 = max(buy1, -prices[i])\n sell1 = max(sell1, buy1 + prices[i])\n buy2 = max(buy2, sell1 - prices[i])\n sell2 = max(sell2, buy2 + prices[i])\n return sell2\n<|end_body_2|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProfit1(self, prices: List[int]) -> int:\n \"\"\"思路:记忆化递归 @param prices: @return:\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices: List[int]) -> int:\n \"\"\"思路:动态规划法 @param prices: @return:\"\"\"\n <|body_1|>\n\n def maxProfit3(self, prices: List[int]) -> int:\n \"\"\"思路:两个状态 @param prices: @return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxProfit1(self, prices: List[int]) -> int:\n \"\"\"思路:记忆化递归 @param prices: @return:\"\"\"\n if not prices:\n return 0\n n = len(prices)\n\n @lru_cache(None)\n def dfs(index, status, k):\n if index == n or k == 0:\n return 0\n a, b, c = (0, 0, 0)\n a = dfs(index + 1, status, k)\n if status:\n b = dfs(index + 1, 0, k - 1) + prices[index]\n else:\n c = dfs(index + 1, 1, k) - prices[index]\n return max(a, b, c)\n return dfs(0, 0, 2)\n\n def maxProfit2(self, prices: List[int]) -> int:\n \"\"\"思路:动态规划法 @param prices: @return:\"\"\"\n if not prices:\n return 0\n n = len(prices)\n dp = [[[0] * 2 for _ in range(3)] for _ in range(n)]\n for i in range(3):\n dp[0][i][0] = -prices[0]\n for i in range(1, n):\n for j in range(1, 3):\n dp[i][j - 1][0] = max(dp[i - 1][j - 1][0], dp[i - 1][j - 1][1] - prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] + prices[i])\n return dp[-1][2][1]\n\n def maxProfit3(self, prices: List[int]) -> int:\n \"\"\"思路:两个状态 @param prices: @return:\"\"\"\n n = len(prices)\n buy1, buy2 = (-prices[0], -prices[0])\n sell1, sell2 = (0, 0)\n for i in range(1, n):\n buy1 = max(buy1, -prices[i])\n sell1 = max(sell1, buy1 + prices[i])\n buy2 = max(buy2, sell1 - prices[i])\n sell2 = max(sell2, buy2 + prices[i])\n return sell2\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/股票购买专题/123. 买卖股票的最佳时机 III.py", "source_repo": "yiming1012/MyLeetCode", "split": "val", "star_events_count": 2} {"blob_id": "899813d6c430bada0e3e38b84264c07ff6d6cb91", "bodies": ["super(LAMBOptimizer_v1, self).__init__(False, name)\nself.learning_rate = learning_rate\nself.weight_decay_rate = weight_decay_rate\nself.beta_1 = beta_1\nself.beta_2 = beta_2\nself.epsilon = epsilon\nself.exclude_from_weight_decay = exclude_from_weight_decay\nself.include_in_weight_decay = include_in_weight_decay", "assignments = []\nif learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\nelse:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\nfor grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/lamb_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/lamb_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n r = tf.where(tf.greater(r1, 0.0), tf.where(tf.greater(r2, 0.0), r1 / r2, 1.0), 1.0)\n eta = learning_rate * r\n update_with_lr = eta * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\nreturn tf.group(*assignments, name=name)", "if not self.weight_decay_rate:\n return False\nif self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\nif self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\nreturn True", "m = re.match('^(.*):\\\\d+$', param_name)\nif m is not None:\n param_name = m.group(1)\nreturn param_name"], "bodies_text": "<|body_start_0|>\n super(LAMBOptimizer_v1, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/lamb_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/lamb_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n r = tf.where(tf.greater(r1, 0.0), tf.where(tf.greater(r2, 0.0), r1 / r2, 1.0), 1.0)\n eta = learning_rate * r\n update_with_lr = eta * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_3|>\n", "class_docstring": "LAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.", "class_name": "LAMBOptimizer_v1", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LAMBOptimizer_v1:\n \"\"\"LAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LAMBOptimizer_v1, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/lamb_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/lamb_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n r = tf.where(tf.greater(r1, 0.0), tf.where(tf.greater(r2, 0.0), r1 / r2, 1.0), 1.0)\n eta = learning_rate * r\n update_with_lr = eta * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000184", "length_bytes": 25398, "license_type": "permissive", "methods": [{"docstring": "Constructs a LAMBOptimizer.", "name": "__init__", "signature": "def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer')"}, {"docstring": "See base class.", "name": "apply_gradients", "signature": "def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None)"}, {"docstring": "Whether to use L2 weight decay for `param_name`.", "name": "_do_use_weight_decay", "signature": "def _do_use_weight_decay(self, param_name)"}, {"docstring": "Get the variable name from the tensor name.", "name": "_get_variable_name", "signature": "def _get_variable_name(self, param_name)"}], "n_methods": 4, "prompt": "Implement the Python class `LAMBOptimizer_v1` described below.\n\nClass description:\nLAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\n\nMethod signatures and docstrings:\n- def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'): Constructs a LAMBOptimizer.\n- def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None): See base class.\n- def _do_use_weight_decay(self, param_name): Whether to use L2 weight decay for `param_name`.\n- def _get_variable_name(self, param_name): Get the variable name from the tensor name.", "prompted_full_text": "Implement the Python class `LAMBOptimizer_v1` described below.\n\nClass description:\nLAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\n\nMethod signatures and docstrings:\n- def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'): Constructs a LAMBOptimizer.\n- def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None): See base class.\n- def _do_use_weight_decay(self, param_name): Whether to use L2 weight decay for `param_name`.\n- def _get_variable_name(self, param_name): Get the variable name from the tensor name.\n\n<|skeleton|>\nclass LAMBOptimizer_v1:\n \"\"\"LAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LAMBOptimizer_v1, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/lamb_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/lamb_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n r = tf.where(tf.greater(r1, 0.0), tf.where(tf.greater(r2, 0.0), r1 / r2, 1.0), 1.0)\n eta = learning_rate * r\n update_with_lr = eta * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_3|>\n", "revision_id": "480c909e0835a455606e829310ff949c9dd23549", "skeleton": "<|skeleton|>\nclass LAMBOptimizer_v1:\n \"\"\"LAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LAMBOptimizer_v1:\n \"\"\"LAMBOptimizer optimizer. https://github.com/ymcui/LAMB_Optimizer_TF # IMPORTANT NOTE - This is NOT an official implementation. - LAMB optimizer is changed from arXiv v1 ~ v3. - We implement v3 version (which is the latest version on June, 2019.). - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google). # References - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3 - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805 # Parameters - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n super(LAMBOptimizer_v1, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/lamb_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/lamb_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n r = tf.where(tf.greater(r1, 0.0), tf.where(tf.greater(r2, 0.0), r1 / r2, 1.0), 1.0)\n eta = learning_rate * r\n update_with_lr = eta * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n", "source": "the_stack_v2_python_sparse", "source_path": "t2t_bert/optimizer/optimizer_utils.py", "source_repo": "yyht/BERT", "split": "val", "star_events_count": 37} {"blob_id": "5bcc1f933c809062f02d79567fde618594f5c5f3", "bodies": ["data = []\nfor i, m in enumerate(result.get('availableMachineType', [])):\n key = ''\n if i == 0:\n key = 'machine types'\n data.append((key, self._presenter.PresentElement(m)))\nfor window in result.get('maintenanceWindows', []):\n maintenance_info = []\n maintenance_info.append(('name', window['name']))\n maintenance_info.append(('description', window['description']))\n maintenance_info.append(('begin-time', window['beginTime']))\n maintenance_info.append(('end-time', window['endTime']))\n data.append(('maintenance-window', maintenance_info))\nreturn data", "zone_context = self._context_parser.ParseContextOrPrompt('zones', zone_name)\nrequest = self.api.zones.get(project=zone_context['project'], zone=zone_context['zone'])\nreturn request.execute()"], "bodies_text": "<|body_start_0|>\n data = []\n for i, m in enumerate(result.get('availableMachineType', [])):\n key = ''\n if i == 0:\n key = 'machine types'\n data.append((key, self._presenter.PresentElement(m)))\n for window in result.get('maintenanceWindows', []):\n maintenance_info = []\n maintenance_info.append(('name', window['name']))\n maintenance_info.append(('description', window['description']))\n maintenance_info.append(('begin-time', window['beginTime']))\n maintenance_info.append(('end-time', window['endTime']))\n data.append(('maintenance-window', maintenance_info))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n zone_context = self._context_parser.ParseContextOrPrompt('zones', zone_name)\n request = self.api.zones.get(project=zone_context['project'], zone=zone_context['zone'])\n return request.execute()\n<|end_body_1|>\n", "class_docstring": "Get a zone.", "class_name": "GetZone", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetZone:\n \"\"\"Get a zone.\"\"\"\n\n def GetDetailRow(self, result):\n \"\"\"Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\"\"\"\n <|body_0|>\n\n def Handle(self, zone_name):\n \"\"\"Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = []\n for i, m in enumerate(result.get('availableMachineType', [])):\n key = ''\n if i == 0:\n key = 'machine types'\n data.append((key, self._presenter.PresentElement(m)))\n for window in result.get('maintenanceWindows', []):\n maintenance_info = []\n maintenance_info.append(('name', window['name']))\n maintenance_info.append(('description', window['description']))\n maintenance_info.append(('begin-time', window['beginTime']))\n maintenance_info.append(('end-time', window['endTime']))\n data.append(('maintenance-window', maintenance_info))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n zone_context = self._context_parser.ParseContextOrPrompt('zones', zone_name)\n request = self.api.zones.get(project=zone_context['project'], zone=zone_context['zone'])\n return request.execute()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000185", "length_bytes": 4211, "license_type": "permissive", "methods": [{"docstring": "Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.", "name": "GetDetailRow", "signature": "def GetDetailRow(self, result)"}, {"docstring": "Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.", "name": "Handle", "signature": "def Handle(self, zone_name)"}], "n_methods": 2, "prompt": "Implement the Python class `GetZone` described below.\n\nClass description:\nGet a zone.\n\nMethod signatures and docstrings:\n- def GetDetailRow(self, result): Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\n- def Handle(self, zone_name): Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.", "prompted_full_text": "Implement the Python class `GetZone` described below.\n\nClass description:\nGet a zone.\n\nMethod signatures and docstrings:\n- def GetDetailRow(self, result): Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\n- def Handle(self, zone_name): Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.\n\n<|skeleton|>\nclass GetZone:\n \"\"\"Get a zone.\"\"\"\n\n def GetDetailRow(self, result):\n \"\"\"Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\"\"\"\n <|body_0|>\n\n def Handle(self, zone_name):\n \"\"\"Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = []\n for i, m in enumerate(result.get('availableMachineType', [])):\n key = ''\n if i == 0:\n key = 'machine types'\n data.append((key, self._presenter.PresentElement(m)))\n for window in result.get('maintenanceWindows', []):\n maintenance_info = []\n maintenance_info.append(('name', window['name']))\n maintenance_info.append(('description', window['description']))\n maintenance_info.append(('begin-time', window['beginTime']))\n maintenance_info.append(('end-time', window['endTime']))\n data.append(('maintenance-window', maintenance_info))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n zone_context = self._context_parser.ParseContextOrPrompt('zones', zone_name)\n request = self.api.zones.get(project=zone_context['project'], zone=zone_context['zone'])\n return request.execute()\n<|end_body_1|>\n", "revision_id": "d379afa2db3582d5c3be652165f0e9e2e0c154c6", "skeleton": "<|skeleton|>\nclass GetZone:\n \"\"\"Get a zone.\"\"\"\n\n def GetDetailRow(self, result):\n \"\"\"Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\"\"\"\n <|body_0|>\n\n def Handle(self, zone_name):\n \"\"\"Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GetZone:\n \"\"\"Get a zone.\"\"\"\n\n def GetDetailRow(self, result):\n \"\"\"Returns an associative list of items for display in a detail table. Args: result: A dict returned by the server. Returns: A list.\"\"\"\n data = []\n for i, m in enumerate(result.get('availableMachineType', [])):\n key = ''\n if i == 0:\n key = 'machine types'\n data.append((key, self._presenter.PresentElement(m)))\n for window in result.get('maintenanceWindows', []):\n maintenance_info = []\n maintenance_info.append(('name', window['name']))\n maintenance_info.append(('description', window['description']))\n maintenance_info.append(('begin-time', window['beginTime']))\n maintenance_info.append(('end-time', window['endTime']))\n data.append(('maintenance-window', maintenance_info))\n return data\n\n def Handle(self, zone_name):\n \"\"\"Get the specified zone. Args: zone_name: Path of the zone to get. Returns: The result of getting the zone.\"\"\"\n zone_context = self._context_parser.ParseContextOrPrompt('zones', zone_name)\n request = self.api.zones.get(project=zone_context['project'], zone=zone_context['zone'])\n return request.execute()\n", "source": "the_stack_v2_python_sparse", "source_path": "y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/zone_cmds.py", "source_repo": "ychen820/microblog", "split": "val", "star_events_count": 0} {"blob_id": "6196fcd7581771681e58bf83d8b72ed89e56b230", "bodies": ["path = os.path.join(self.tempdir, 'foo.json')\nconfig = {'foo': 1, 'bar': 2}\nworkspace_lib.WriteConfigFile(path, config)\nself.assertDictEqual(config, workspace_lib.ReadConfigFile(path))", "path = os.path.join(self.tempdir, 'foo.json')\nconfig = Exception()\nwith self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.WriteConfigFile(path, config)", "path = os.path.join(self.tempdir, 'foo.json')\nosutils.WriteFile(path, 'invalid contents')\nwith self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)"], "bodies_text": "<|body_start_0|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = {'foo': 1, 'bar': 2}\n workspace_lib.WriteConfigFile(path, config)\n self.assertDictEqual(config, workspace_lib.ReadConfigFile(path))\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = Exception()\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.WriteConfigFile(path, config)\n<|end_body_1|>\n\n<|body_start_2|>\n path = os.path.join(self.tempdir, 'foo.json')\n osutils.WriteFile(path, 'invalid contents')\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)\n<|end_body_2|>\n", "class_docstring": "Test WriteConfigFile() and ReadConfigFile().", "class_name": "ConfigurationTest", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigurationTest:\n \"\"\"Test WriteConfigFile() and ReadConfigFile().\"\"\"\n\n def testWriteReadConfigFile(self):\n \"\"\"Tests WriteConfigFile() then ReadConfigFile().\"\"\"\n <|body_0|>\n\n def testWriteConfigFileInvalid(self):\n \"\"\"Tests writing an invalid configuration file.\"\"\"\n <|body_1|>\n\n def testReadConfigFileInvalid(self):\n \"\"\"Tests reading an invalid configuration file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = {'foo': 1, 'bar': 2}\n workspace_lib.WriteConfigFile(path, config)\n self.assertDictEqual(config, workspace_lib.ReadConfigFile(path))\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = Exception()\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.WriteConfigFile(path, config)\n<|end_body_1|>\n\n<|body_start_2|>\n path = os.path.join(self.tempdir, 'foo.json')\n osutils.WriteFile(path, 'invalid contents')\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000186", "length_bytes": 9422, "license_type": "permissive", "methods": [{"docstring": "Tests WriteConfigFile() then ReadConfigFile().", "name": "testWriteReadConfigFile", "signature": "def testWriteReadConfigFile(self)"}, {"docstring": "Tests writing an invalid configuration file.", "name": "testWriteConfigFileInvalid", "signature": "def testWriteConfigFileInvalid(self)"}, {"docstring": "Tests reading an invalid configuration file.", "name": "testReadConfigFileInvalid", "signature": "def testReadConfigFileInvalid(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004054", "prompt": "Implement the Python class `ConfigurationTest` described below.\n\nClass description:\nTest WriteConfigFile() and ReadConfigFile().\n\nMethod signatures and docstrings:\n- def testWriteReadConfigFile(self): Tests WriteConfigFile() then ReadConfigFile().\n- def testWriteConfigFileInvalid(self): Tests writing an invalid configuration file.\n- def testReadConfigFileInvalid(self): Tests reading an invalid configuration file.", "prompted_full_text": "Implement the Python class `ConfigurationTest` described below.\n\nClass description:\nTest WriteConfigFile() and ReadConfigFile().\n\nMethod signatures and docstrings:\n- def testWriteReadConfigFile(self): Tests WriteConfigFile() then ReadConfigFile().\n- def testWriteConfigFileInvalid(self): Tests writing an invalid configuration file.\n- def testReadConfigFileInvalid(self): Tests reading an invalid configuration file.\n\n<|skeleton|>\nclass ConfigurationTest:\n \"\"\"Test WriteConfigFile() and ReadConfigFile().\"\"\"\n\n def testWriteReadConfigFile(self):\n \"\"\"Tests WriteConfigFile() then ReadConfigFile().\"\"\"\n <|body_0|>\n\n def testWriteConfigFileInvalid(self):\n \"\"\"Tests writing an invalid configuration file.\"\"\"\n <|body_1|>\n\n def testReadConfigFileInvalid(self):\n \"\"\"Tests reading an invalid configuration file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = {'foo': 1, 'bar': 2}\n workspace_lib.WriteConfigFile(path, config)\n self.assertDictEqual(config, workspace_lib.ReadConfigFile(path))\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.join(self.tempdir, 'foo.json')\n config = Exception()\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.WriteConfigFile(path, config)\n<|end_body_1|>\n\n<|body_start_2|>\n path = os.path.join(self.tempdir, 'foo.json')\n osutils.WriteFile(path, 'invalid contents')\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)\n<|end_body_2|>\n", "revision_id": "e71f21b9b4b9b839f5093301974a45545dad2691", "skeleton": "<|skeleton|>\nclass ConfigurationTest:\n \"\"\"Test WriteConfigFile() and ReadConfigFile().\"\"\"\n\n def testWriteReadConfigFile(self):\n \"\"\"Tests WriteConfigFile() then ReadConfigFile().\"\"\"\n <|body_0|>\n\n def testWriteConfigFileInvalid(self):\n \"\"\"Tests writing an invalid configuration file.\"\"\"\n <|body_1|>\n\n def testReadConfigFileInvalid(self):\n \"\"\"Tests reading an invalid configuration file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConfigurationTest:\n \"\"\"Test WriteConfigFile() and ReadConfigFile().\"\"\"\n\n def testWriteReadConfigFile(self):\n \"\"\"Tests WriteConfigFile() then ReadConfigFile().\"\"\"\n path = os.path.join(self.tempdir, 'foo.json')\n config = {'foo': 1, 'bar': 2}\n workspace_lib.WriteConfigFile(path, config)\n self.assertDictEqual(config, workspace_lib.ReadConfigFile(path))\n\n def testWriteConfigFileInvalid(self):\n \"\"\"Tests writing an invalid configuration file.\"\"\"\n path = os.path.join(self.tempdir, 'foo.json')\n config = Exception()\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.WriteConfigFile(path, config)\n\n def testReadConfigFileInvalid(self):\n \"\"\"Tests reading an invalid configuration file.\"\"\"\n path = os.path.join(self.tempdir, 'foo.json')\n osutils.WriteFile(path, 'invalid contents')\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/chromite/lib/workspace_lib_unittest.py", "source_repo": "zenoalbisser/chromium", "split": "val", "star_events_count": 0} {"blob_id": "2d6d1211751bdaa27439e9d456f0e05c68c56a1e", "bodies": ["super(Generator, self).__init__()\nself.num_gpu = num_gpu\nself.layer = nn.Sequential(nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(conv_dim * 8), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 8, conv_dim * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 4), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 2), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim), nn.ReLU(True), nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False), nn.Tanh())", "if isinstance(input.data, torch.cuda.FloatTensor) and self.num_gpu > 1:\n out = nn.parallel.data_parallel(self.layer, input, range(self.num_gpu))\nelse:\n out = self.layer(input)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(Generator, self).__init__()\n self.num_gpu = num_gpu\n self.layer = nn.Sequential(nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(conv_dim * 8), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 8, conv_dim * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 4), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 2), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim), nn.ReLU(True), nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False), nn.Tanh())\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(input.data, torch.cuda.FloatTensor) and self.num_gpu > 1:\n out = nn.parallel.data_parallel(self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out\n<|end_body_1|>\n", "class_docstring": "Model for Generator.", "class_name": "Generator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generator:\n \"\"\"Model for Generator.\"\"\"\n\n def __init__(self, num_channels, z_dim, conv_dim, num_gpu):\n \"\"\"Init for Generator model.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward step for Generator model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n self.num_gpu = num_gpu\n self.layer = nn.Sequential(nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(conv_dim * 8), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 8, conv_dim * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 4), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 2), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim), nn.ReLU(True), nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False), nn.Tanh())\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(input.data, torch.cuda.FloatTensor) and self.num_gpu > 1:\n out = nn.parallel.data_parallel(self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000187", "length_bytes": 4479, "license_type": "permissive", "methods": [{"docstring": "Init for Generator model.", "name": "__init__", "signature": "def __init__(self, num_channels, z_dim, conv_dim, num_gpu)"}, {"docstring": "Forward step for Generator model.", "name": "forward", "signature": "def forward(self, input)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003578", "prompt": "Implement the Python class `Generator` described below.\n\nClass description:\nModel for Generator.\n\nMethod signatures and docstrings:\n- def __init__(self, num_channels, z_dim, conv_dim, num_gpu): Init for Generator model.\n- def forward(self, input): Forward step for Generator model.", "prompted_full_text": "Implement the Python class `Generator` described below.\n\nClass description:\nModel for Generator.\n\nMethod signatures and docstrings:\n- def __init__(self, num_channels, z_dim, conv_dim, num_gpu): Init for Generator model.\n- def forward(self, input): Forward step for Generator model.\n\n<|skeleton|>\nclass Generator:\n \"\"\"Model for Generator.\"\"\"\n\n def __init__(self, num_channels, z_dim, conv_dim, num_gpu):\n \"\"\"Init for Generator model.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward step for Generator model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n self.num_gpu = num_gpu\n self.layer = nn.Sequential(nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(conv_dim * 8), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 8, conv_dim * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 4), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 2), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim), nn.ReLU(True), nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False), nn.Tanh())\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(input.data, torch.cuda.FloatTensor) and self.num_gpu > 1:\n out = nn.parallel.data_parallel(self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out\n<|end_body_1|>\n", "revision_id": "fd4498da35ace5a3d1696ff4fbec3568eddbe6a1", "skeleton": "<|skeleton|>\nclass Generator:\n \"\"\"Model for Generator.\"\"\"\n\n def __init__(self, num_channels, z_dim, conv_dim, num_gpu):\n \"\"\"Init for Generator model.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\"Forward step for Generator model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Generator:\n \"\"\"Model for Generator.\"\"\"\n\n def __init__(self, num_channels, z_dim, conv_dim, num_gpu):\n \"\"\"Init for Generator model.\"\"\"\n super(Generator, self).__init__()\n self.num_gpu = num_gpu\n self.layer = nn.Sequential(nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(conv_dim * 8), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 8, conv_dim * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 4), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim * 2), nn.ReLU(True), nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False), nn.BatchNorm2d(conv_dim), nn.ReLU(True), nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, input):\n \"\"\"Forward step for Generator model.\"\"\"\n if isinstance(input.data, torch.cuda.FloatTensor) and self.num_gpu > 1:\n out = nn.parallel.data_parallel(self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "DCGAN/models.py", "source_repo": "corenel/GAN-Zoo", "split": "val", "star_events_count": 10} {"blob_id": "4f4a0af05157f4f5ce13914aa271b4f0d025ca89", "bodies": ["self.dataset = dataset\nself.logger = logger\nself.length = len(self.dataset)", "if isinstance(test_info, str):\n self._check_load(test_info)\nelse:\n self._check_unit(test_info)", "n, unit = info\ncheck_len = n == self.length\nif check_len:\n logger.get_log().info('dataset length check success !!!')\nelse:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, n))\nfor i in range(self.length):\n jud = np.array_equal(self.dataset[i][0], unit * i)\n if not jud:\n logger.get_log().error('Error in unit {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i], unit * i))\n assert False\nlogger.get_log().info('dataset check success !!!')", "data = DataGenerator(dir)()\nfeatures, labels = data\ncheck_len = len(features) == len(self.dataset)\nif check_len:\n logger.get_log().info('dataset length check success !!!')\nelse:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, len(features)))\n assert False\nstep = len(self.dataset) // 100\nfor i in range(0, len(self.dataset), step):\n jud_f = np.allclose(features[i], self.dataset[i][0])\n jud_l = np.allclose(labels[i], self.dataset[i][1])\n if not jud_f:\n logger.get_log().error('Feature Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][0], features[i]))\n assert False\n if not jud_l:\n logger.get_log().error('Label Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][1], labels[i]))\n assert False\nlogger.get_log().info('dataset skip check success !!!')"], "bodies_text": "<|body_start_0|>\n self.dataset = dataset\n self.logger = logger\n self.length = len(self.dataset)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(test_info, str):\n self._check_load(test_info)\n else:\n self._check_unit(test_info)\n<|end_body_1|>\n\n<|body_start_2|>\n n, unit = info\n check_len = n == self.length\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, n))\n for i in range(self.length):\n jud = np.array_equal(self.dataset[i][0], unit * i)\n if not jud:\n logger.get_log().error('Error in unit {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i], unit * i))\n assert False\n logger.get_log().info('dataset check success !!!')\n<|end_body_2|>\n\n<|body_start_3|>\n data = DataGenerator(dir)()\n features, labels = data\n check_len = len(features) == len(self.dataset)\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, len(features)))\n assert False\n step = len(self.dataset) // 100\n for i in range(0, len(self.dataset), step):\n jud_f = np.allclose(features[i], self.dataset[i][0])\n jud_l = np.allclose(labels[i], self.dataset[i][1])\n if not jud_f:\n logger.get_log().error('Feature Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][0], features[i]))\n assert False\n if not jud_l:\n logger.get_log().error('Label Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][1], labels[i]))\n assert False\n logger.get_log().info('dataset skip check success !!!')\n<|end_body_3|>\n", "class_docstring": "test Dataset class", "class_name": "TestDataset", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestDataset:\n \"\"\"test Dataset class\"\"\"\n\n def __init__(self, dataset):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def run(self, test_info):\n \"\"\"run\"\"\"\n <|body_1|>\n\n def _check_unit(self, info):\n \"\"\"check unit case\"\"\"\n <|body_2|>\n\n def _check_load(self, dir):\n \"\"\"check load case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dataset = dataset\n self.logger = logger\n self.length = len(self.dataset)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(test_info, str):\n self._check_load(test_info)\n else:\n self._check_unit(test_info)\n<|end_body_1|>\n\n<|body_start_2|>\n n, unit = info\n check_len = n == self.length\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, n))\n for i in range(self.length):\n jud = np.array_equal(self.dataset[i][0], unit * i)\n if not jud:\n logger.get_log().error('Error in unit {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i], unit * i))\n assert False\n logger.get_log().info('dataset check success !!!')\n<|end_body_2|>\n\n<|body_start_3|>\n data = DataGenerator(dir)()\n features, labels = data\n check_len = len(features) == len(self.dataset)\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, len(features)))\n assert False\n step = len(self.dataset) // 100\n for i in range(0, len(self.dataset), step):\n jud_f = np.allclose(features[i], self.dataset[i][0])\n jud_l = np.allclose(labels[i], self.dataset[i][1])\n if not jud_f:\n logger.get_log().error('Feature Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][0], features[i]))\n assert False\n if not jud_l:\n logger.get_log().error('Label Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][1], labels[i]))\n assert False\n logger.get_log().info('dataset skip check success !!!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000188", "length_bytes": 7560, "license_type": "no_license", "methods": [{"docstring": "init", "name": "__init__", "signature": "def __init__(self, dataset)"}, {"docstring": "run", "name": "run", "signature": "def run(self, test_info)"}, {"docstring": "check unit case", "name": "_check_unit", "signature": "def _check_unit(self, info)"}, {"docstring": "check load case", "name": "_check_load", "signature": "def _check_load(self, dir)"}], "n_methods": 4, "prompt": "Implement the Python class `TestDataset` described below.\n\nClass description:\ntest Dataset class\n\nMethod signatures and docstrings:\n- def __init__(self, dataset): init\n- def run(self, test_info): run\n- def _check_unit(self, info): check unit case\n- def _check_load(self, dir): check load case", "prompted_full_text": "Implement the Python class `TestDataset` described below.\n\nClass description:\ntest Dataset class\n\nMethod signatures and docstrings:\n- def __init__(self, dataset): init\n- def run(self, test_info): run\n- def _check_unit(self, info): check unit case\n- def _check_load(self, dir): check load case\n\n<|skeleton|>\nclass TestDataset:\n \"\"\"test Dataset class\"\"\"\n\n def __init__(self, dataset):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def run(self, test_info):\n \"\"\"run\"\"\"\n <|body_1|>\n\n def _check_unit(self, info):\n \"\"\"check unit case\"\"\"\n <|body_2|>\n\n def _check_load(self, dir):\n \"\"\"check load case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dataset = dataset\n self.logger = logger\n self.length = len(self.dataset)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(test_info, str):\n self._check_load(test_info)\n else:\n self._check_unit(test_info)\n<|end_body_1|>\n\n<|body_start_2|>\n n, unit = info\n check_len = n == self.length\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, n))\n for i in range(self.length):\n jud = np.array_equal(self.dataset[i][0], unit * i)\n if not jud:\n logger.get_log().error('Error in unit {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i], unit * i))\n assert False\n logger.get_log().info('dataset check success !!!')\n<|end_body_2|>\n\n<|body_start_3|>\n data = DataGenerator(dir)()\n features, labels = data\n check_len = len(features) == len(self.dataset)\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, len(features)))\n assert False\n step = len(self.dataset) // 100\n for i in range(0, len(self.dataset), step):\n jud_f = np.allclose(features[i], self.dataset[i][0])\n jud_l = np.allclose(labels[i], self.dataset[i][1])\n if not jud_f:\n logger.get_log().error('Feature Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][0], features[i]))\n assert False\n if not jud_l:\n logger.get_log().error('Label Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][1], labels[i]))\n assert False\n logger.get_log().info('dataset skip check success !!!')\n<|end_body_3|>\n", "revision_id": "bd3790ce72a2a26611b5eda3901651b5a809348f", "skeleton": "<|skeleton|>\nclass TestDataset:\n \"\"\"test Dataset class\"\"\"\n\n def __init__(self, dataset):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def run(self, test_info):\n \"\"\"run\"\"\"\n <|body_1|>\n\n def _check_unit(self, info):\n \"\"\"check unit case\"\"\"\n <|body_2|>\n\n def _check_load(self, dir):\n \"\"\"check load case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestDataset:\n \"\"\"test Dataset class\"\"\"\n\n def __init__(self, dataset):\n \"\"\"init\"\"\"\n self.dataset = dataset\n self.logger = logger\n self.length = len(self.dataset)\n\n def run(self, test_info):\n \"\"\"run\"\"\"\n if isinstance(test_info, str):\n self._check_load(test_info)\n else:\n self._check_unit(test_info)\n\n def _check_unit(self, info):\n \"\"\"check unit case\"\"\"\n n, unit = info\n check_len = n == self.length\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, n))\n for i in range(self.length):\n jud = np.array_equal(self.dataset[i][0], unit * i)\n if not jud:\n logger.get_log().error('Error in unit {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i], unit * i))\n assert False\n logger.get_log().info('dataset check success !!!')\n\n def _check_load(self, dir):\n \"\"\"check load case\"\"\"\n data = DataGenerator(dir)()\n features, labels = data\n check_len = len(features) == len(self.dataset)\n if check_len:\n logger.get_log().info('dataset length check success !!!')\n else:\n logger.get_log().error('dataset length is {}, but the input length is {}'.format(self.dataset, len(features)))\n assert False\n step = len(self.dataset) // 100\n for i in range(0, len(self.dataset), step):\n jud_f = np.allclose(features[i], self.dataset[i][0])\n jud_l = np.allclose(labels[i], self.dataset[i][1])\n if not jud_f:\n logger.get_log().error('Feature Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][0], features[i]))\n assert False\n if not jud_l:\n logger.get_log().error('Label Error in iter {}'.format(i))\n logger.get_log().error('dataset is {}, but calculation is {}'.format(self.dataset[i][1], labels[i]))\n assert False\n logger.get_log().info('dataset skip check success !!!')\n", "source": "the_stack_v2_python_sparse", "source_path": "framework/e2e/io/io_test.py", "source_repo": "PaddlePaddle/PaddleTest", "split": "val", "star_events_count": 42} {"blob_id": "2b2e0e3322b6a103815664f6f409ebeca538a599", "bodies": ["self.object = self.get_object()\nsuccess_url = self.get_success_url()\nself.object.delete()\ndel request.session['username']\nrequest.session.modified = True\nreturn HttpResponseRedirect(success_url)", "current_user = super(DeleteUserProfile, self).get_object(queryset)\nif current_user.username != self.request.user.username:\n raise Http404(\"Please respect other's privacy!\")\nreturn current_user"], "bodies_text": "<|body_start_0|>\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n del request.session['username']\n request.session.modified = True\n return HttpResponseRedirect(success_url)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user = super(DeleteUserProfile, self).get_object(queryset)\n if current_user.username != self.request.user.username:\n raise Http404(\"Please respect other's privacy!\")\n return current_user\n<|end_body_1|>\n", "class_docstring": "Deletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.", "class_name": "DeleteUserProfile", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeleteUserProfile:\n \"\"\"Deletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\"\"\"\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes the session\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"This will verify if the current user is deleting his profile or not\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n del request.session['username']\n request.session.modified = True\n return HttpResponseRedirect(success_url)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user = super(DeleteUserProfile, self).get_object(queryset)\n if current_user.username != self.request.user.username:\n raise Http404(\"Please respect other's privacy!\")\n return current_user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000189", "length_bytes": 5860, "license_type": "permissive", "methods": [{"docstring": "Deletes the session", "name": "delete", "signature": "def delete(self, request, *args, **kwargs)"}, {"docstring": "This will verify if the current user is deleting his profile or not", "name": "get_object", "signature": "def get_object(self, queryset=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000340", "prompt": "Implement the Python class `DeleteUserProfile` described below.\n\nClass description:\nDeletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\n\nMethod signatures and docstrings:\n- def delete(self, request, *args, **kwargs): Deletes the session\n- def get_object(self, queryset=None): This will verify if the current user is deleting his profile or not", "prompted_full_text": "Implement the Python class `DeleteUserProfile` described below.\n\nClass description:\nDeletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\n\nMethod signatures and docstrings:\n- def delete(self, request, *args, **kwargs): Deletes the session\n- def get_object(self, queryset=None): This will verify if the current user is deleting his profile or not\n\n<|skeleton|>\nclass DeleteUserProfile:\n \"\"\"Deletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\"\"\"\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes the session\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"This will verify if the current user is deleting his profile or not\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n del request.session['username']\n request.session.modified = True\n return HttpResponseRedirect(success_url)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user = super(DeleteUserProfile, self).get_object(queryset)\n if current_user.username != self.request.user.username:\n raise Http404(\"Please respect other's privacy!\")\n return current_user\n<|end_body_1|>\n", "revision_id": "9ee3366ab6550fe73845f76ae6136319e59cbdac", "skeleton": "<|skeleton|>\nclass DeleteUserProfile:\n \"\"\"Deletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\"\"\"\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes the session\"\"\"\n <|body_0|>\n\n def get_object(self, queryset=None):\n \"\"\"This will verify if the current user is deleting his profile or not\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeleteUserProfile:\n \"\"\"Deletes the user profile :param LoginRequiredMixin, DeleteView: Mixin that will check if user is logged in, Django's Generic View :return: Render login form if successfully delete the current session and redirect.\"\"\"\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes the session\"\"\"\n self.object = self.get_object()\n success_url = self.get_success_url()\n self.object.delete()\n del request.session['username']\n request.session.modified = True\n return HttpResponseRedirect(success_url)\n\n def get_object(self, queryset=None):\n \"\"\"This will verify if the current user is deleting his profile or not\"\"\"\n current_user = super(DeleteUserProfile, self).get_object(queryset)\n if current_user.username != self.request.user.username:\n raise Http404(\"Please respect other's privacy!\")\n return current_user\n", "source": "the_stack_v2_python_sparse", "source_path": "BestStore/User/views.py", "source_repo": "rishabh-22/BestStore", "split": "val", "star_events_count": 1} {"blob_id": "c19dc4d06e3352320daef1e6684eee0b79e32c59", "bodies": ["self._name = name or 'forward_rate_agreement'\nif rate_term is None and maturity_date is None:\n raise ValueError('Error creating FRA. Either rate_term or maturity_date is required.')\nwith tf.name_scope(self._name):\n self._dtype = dtype\n self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)\n self._fixing_date = dates.convert_to_date_tensor(fixing_date)\n self._settlement_date = dates.convert_to_date_tensor(settlement_date)\n self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)\n if rate_term is None:\n self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)\n else:\n self._accrual_end_date = self._accrual_start_date + rate_term\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype, name='fixed_rate')\n self._daycount_convention = daycount_convention\n self._daycount_fraction = rc.get_daycount_fraction(self._accrual_start_date, self._accrual_end_date, self._daycount_convention, self._dtype)", "del model, valuation_date\nreference_curve = market.reference_curve\ndiscount_curve = market.discount_curve\nfwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\ndiscount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\nreturn discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)"], "bodies_text": "<|body_start_0|>\n self._name = name or 'forward_rate_agreement'\n if rate_term is None and maturity_date is None:\n raise ValueError('Error creating FRA. Either rate_term or maturity_date is required.')\n with tf.name_scope(self._name):\n self._dtype = dtype\n self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)\n self._fixing_date = dates.convert_to_date_tensor(fixing_date)\n self._settlement_date = dates.convert_to_date_tensor(settlement_date)\n self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)\n if rate_term is None:\n self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)\n else:\n self._accrual_end_date = self._accrual_start_date + rate_term\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype, name='fixed_rate')\n self._daycount_convention = daycount_convention\n self._daycount_fraction = rc.get_daycount_fraction(self._accrual_start_date, self._accrual_end_date, self._daycount_convention, self._dtype)\n<|end_body_0|>\n\n<|body_start_1|>\n del model, valuation_date\n reference_curve = market.reference_curve\n discount_curve = market.discount_curve\n fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\n return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)\n<|end_body_1|>\n", "class_docstring": "Represents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc", "class_name": "ForwardRateAgreement", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ForwardRateAgreement:\n \"\"\"Represents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\"\"\"\n\n def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None):\n \"\"\"Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\"\"\"\n <|body_0|>\n\n def price(self, valuation_date, market, model=None):\n \"\"\"Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._name = name or 'forward_rate_agreement'\n if rate_term is None and maturity_date is None:\n raise ValueError('Error creating FRA. Either rate_term or maturity_date is required.')\n with tf.name_scope(self._name):\n self._dtype = dtype\n self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)\n self._fixing_date = dates.convert_to_date_tensor(fixing_date)\n self._settlement_date = dates.convert_to_date_tensor(settlement_date)\n self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)\n if rate_term is None:\n self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)\n else:\n self._accrual_end_date = self._accrual_start_date + rate_term\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype, name='fixed_rate')\n self._daycount_convention = daycount_convention\n self._daycount_fraction = rc.get_daycount_fraction(self._accrual_start_date, self._accrual_end_date, self._daycount_convention, self._dtype)\n<|end_body_0|>\n\n<|body_start_1|>\n del model, valuation_date\n reference_curve = market.reference_curve\n discount_curve = market.discount_curve\n fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\n return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000190", "length_bytes": 7988, "license_type": "permissive", "methods": [{"docstring": "Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec", "name": "__init__", "signature": "def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None)"}, {"docstring": "Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.", "name": "price", "signature": "def price(self, valuation_date, market, model=None)"}], "n_methods": 2, "prompt": "Implement the Python class `ForwardRateAgreement` described below.\n\nClass description:\nRepresents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\n\nMethod signatures and docstrings:\n- def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None): Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\n- def price(self, valuation_date, market, model=None): Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.", "prompted_full_text": "Implement the Python class `ForwardRateAgreement` described below.\n\nClass description:\nRepresents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\n\nMethod signatures and docstrings:\n- def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None): Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\n- def price(self, valuation_date, market, model=None): Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.\n\n<|skeleton|>\nclass ForwardRateAgreement:\n \"\"\"Represents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\"\"\"\n\n def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None):\n \"\"\"Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\"\"\"\n <|body_0|>\n\n def price(self, valuation_date, market, model=None):\n \"\"\"Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._name = name or 'forward_rate_agreement'\n if rate_term is None and maturity_date is None:\n raise ValueError('Error creating FRA. Either rate_term or maturity_date is required.')\n with tf.name_scope(self._name):\n self._dtype = dtype\n self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)\n self._fixing_date = dates.convert_to_date_tensor(fixing_date)\n self._settlement_date = dates.convert_to_date_tensor(settlement_date)\n self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)\n if rate_term is None:\n self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)\n else:\n self._accrual_end_date = self._accrual_start_date + rate_term\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype, name='fixed_rate')\n self._daycount_convention = daycount_convention\n self._daycount_fraction = rc.get_daycount_fraction(self._accrual_start_date, self._accrual_end_date, self._daycount_convention, self._dtype)\n<|end_body_0|>\n\n<|body_start_1|>\n del model, valuation_date\n reference_curve = market.reference_curve\n discount_curve = market.discount_curve\n fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\n return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)\n<|end_body_1|>\n", "revision_id": "0d3a2193c0f2d320b65e602cf01d7a617da484df", "skeleton": "<|skeleton|>\nclass ForwardRateAgreement:\n \"\"\"Represents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\"\"\"\n\n def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None):\n \"\"\"Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\"\"\"\n <|body_0|>\n\n def price(self, valuation_date, market, model=None):\n \"\"\"Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ForwardRateAgreement:\n \"\"\"Represents a batch of Forward Rate Agreements (FRA). An FRA is a contract for the period [T, T+tau] where the holder exchanges a fixed rate (agreed at the start of the contract) against a floating payment determined at time T based on the spot Libor rate for term `tau`. The cashflows are exchanged at the settlement time T_s, which is either equal to T or close to T. The FRA are structured so that the payments are made in T+tau dollars (ref [1]). The ForwardRateAgreement class can be used to create and price multiple FRAs simultaneously. However all FRAs within a FRA object must be priced using a common reference and discount curve. #### Example: The following example illustrates the construc\"\"\"\n\n def __init__(self, settlement_date, fixing_date, fixed_rate, notional=1.0, daycount_convention=None, rate_term=None, maturity_date=None, dtype=None, name=None):\n \"\"\"Initialize the batch of FRA contracts. Args: settlement_date: A rank 1 `DateTensor` specifying the dates on which cashflows are settled. The shape of the input correspond to the number of instruments being created. fixing_date: A rank 1 `DateTensor` specifying the dates on which forward rate will be fixed. The shape of the inout should be the same as that of `settlement_date`. fixed_rate: A rank 1 `Tensor` of real dtype specifying the fixed rate payment agreed at the initiation of the individual contracts. The shape should be the same as that of `settlement_date`. notional: A scalar or a rank 1 `Tensor` of real dtype specifying the notional amount for each contract. When the notional is spec\"\"\"\n self._name = name or 'forward_rate_agreement'\n if rate_term is None and maturity_date is None:\n raise ValueError('Error creating FRA. Either rate_term or maturity_date is required.')\n with tf.name_scope(self._name):\n self._dtype = dtype\n self._notional = tf.convert_to_tensor(notional, dtype=self._dtype)\n self._fixing_date = dates.convert_to_date_tensor(fixing_date)\n self._settlement_date = dates.convert_to_date_tensor(settlement_date)\n self._accrual_start_date = dates.convert_to_date_tensor(settlement_date)\n if rate_term is None:\n self._accrual_end_date = dates.convert_to_date_tensor(maturity_date)\n else:\n self._accrual_end_date = self._accrual_start_date + rate_term\n if daycount_convention is None:\n daycount_convention = rc.DayCountConvention.ACTUAL_360\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype, name='fixed_rate')\n self._daycount_convention = daycount_convention\n self._daycount_fraction = rc.get_daycount_fraction(self._accrual_start_date, self._accrual_end_date, self._daycount_convention, self._dtype)\n\n def price(self, valuation_date, market, model=None):\n \"\"\"Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.\"\"\"\n del model, valuation_date\n reference_curve = market.reference_curve\n discount_curve = market.discount_curve\n fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\n return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)\n", "source": "the_stack_v2_python_sparse", "source_path": "tf_quant_finance/experimental/instruments/forward_rate_agreement.py", "source_repo": "google/tf-quant-finance", "split": "val", "star_events_count": 4165} {"blob_id": "2dbf3d54c3cdce0619b317f0fb059cd179135a5b", "bodies": ["self.loss_scale = init_scale\nself.scale_factor = scale_factor\nself.scale_window = scale_window\nself.tolerance = tolerance\nself.threshold = threshold\nself._iter = 0\nself._last_overflow_iter = -1\nself._last_rescale_iter = -1\nself._overflows_since_rescale = 0", "iter_since_rescale = self._iter - self._last_rescale_iter\nif overflow:\n self._last_overflow_iter = self._iter\n self._overflows_since_rescale += 1\n pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)\n if pct_overflow >= self.tolerance:\n self._decrease_loss_scale()\n self._last_rescale_iter = self._iter\n self._overflows_since_rescale = 0\nelif (self._iter - self._last_overflow_iter) % self.scale_window == 0:\n self.loss_scale *= self.scale_factor\n self._last_rescale_iter = self._iter\nself._iter += 1", "self.loss_scale /= self.scale_factor\nif self.threshold is not None:\n self.loss_scale = max(self.loss_scale, self.threshold)"], "bodies_text": "<|body_start_0|>\n self.loss_scale = init_scale\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.tolerance = tolerance\n self.threshold = threshold\n self._iter = 0\n self._last_overflow_iter = -1\n self._last_rescale_iter = -1\n self._overflows_since_rescale = 0\n<|end_body_0|>\n\n<|body_start_1|>\n iter_since_rescale = self._iter - self._last_rescale_iter\n if overflow:\n self._last_overflow_iter = self._iter\n self._overflows_since_rescale += 1\n pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)\n if pct_overflow >= self.tolerance:\n self._decrease_loss_scale()\n self._last_rescale_iter = self._iter\n self._overflows_since_rescale = 0\n elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:\n self.loss_scale *= self.scale_factor\n self._last_rescale_iter = self._iter\n self._iter += 1\n<|end_body_1|>\n\n<|body_start_2|>\n self.loss_scale /= self.scale_factor\n if self.threshold is not None:\n self.loss_scale = max(self.loss_scale, self.threshold)\n<|end_body_2|>\n", "class_docstring": "Dynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. ", "class_name": "DynamicLossScaler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DynamicLossScaler:\n \"\"\"Dynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \"\"\"\n\n def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None):\n \"\"\":param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\"\"\"\n <|body_0|>\n\n def update_scale(self, overflow: bool):\n \"\"\"Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\"\"\"\n <|body_1|>\n\n def _decrease_loss_scale(self):\n \"\"\"Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.loss_scale = init_scale\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.tolerance = tolerance\n self.threshold = threshold\n self._iter = 0\n self._last_overflow_iter = -1\n self._last_rescale_iter = -1\n self._overflows_since_rescale = 0\n<|end_body_0|>\n\n<|body_start_1|>\n iter_since_rescale = self._iter - self._last_rescale_iter\n if overflow:\n self._last_overflow_iter = self._iter\n self._overflows_since_rescale += 1\n pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)\n if pct_overflow >= self.tolerance:\n self._decrease_loss_scale()\n self._last_rescale_iter = self._iter\n self._overflows_since_rescale = 0\n elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:\n self.loss_scale *= self.scale_factor\n self._last_rescale_iter = self._iter\n self._iter += 1\n<|end_body_1|>\n\n<|body_start_2|>\n self.loss_scale /= self.scale_factor\n if self.threshold is not None:\n self.loss_scale = max(self.loss_scale, self.threshold)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000191", "length_bytes": 31338, "license_type": "permissive", "methods": [{"docstring": ":param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold", "name": "__init__", "signature": "def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None)"}, {"docstring": "Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.", "name": "update_scale", "signature": "def update_scale(self, overflow: bool)"}, {"docstring": "Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.", "name": "_decrease_loss_scale", "signature": "def _decrease_loss_scale(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006813", "prompt": "Implement the Python class `DynamicLossScaler` described below.\n\nClass description:\nDynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \n\nMethod signatures and docstrings:\n- def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None): :param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\n- def update_scale(self, overflow: bool): Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\n- def _decrease_loss_scale(self): Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.", "prompted_full_text": "Implement the Python class `DynamicLossScaler` described below.\n\nClass description:\nDynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \n\nMethod signatures and docstrings:\n- def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None): :param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\n- def update_scale(self, overflow: bool): Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\n- def _decrease_loss_scale(self): Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.\n\n<|skeleton|>\nclass DynamicLossScaler:\n \"\"\"Dynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \"\"\"\n\n def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None):\n \"\"\":param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\"\"\"\n <|body_0|>\n\n def update_scale(self, overflow: bool):\n \"\"\"Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\"\"\"\n <|body_1|>\n\n def _decrease_loss_scale(self):\n \"\"\"Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.loss_scale = init_scale\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.tolerance = tolerance\n self.threshold = threshold\n self._iter = 0\n self._last_overflow_iter = -1\n self._last_rescale_iter = -1\n self._overflows_since_rescale = 0\n<|end_body_0|>\n\n<|body_start_1|>\n iter_since_rescale = self._iter - self._last_rescale_iter\n if overflow:\n self._last_overflow_iter = self._iter\n self._overflows_since_rescale += 1\n pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)\n if pct_overflow >= self.tolerance:\n self._decrease_loss_scale()\n self._last_rescale_iter = self._iter\n self._overflows_since_rescale = 0\n elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:\n self.loss_scale *= self.scale_factor\n self._last_rescale_iter = self._iter\n self._iter += 1\n<|end_body_1|>\n\n<|body_start_2|>\n self.loss_scale /= self.scale_factor\n if self.threshold is not None:\n self.loss_scale = max(self.loss_scale, self.threshold)\n<|end_body_2|>\n", "revision_id": "e1d899edfb92471552bae153f59ad30aa7fca468", "skeleton": "<|skeleton|>\nclass DynamicLossScaler:\n \"\"\"Dynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \"\"\"\n\n def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None):\n \"\"\":param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\"\"\"\n <|body_0|>\n\n def update_scale(self, overflow: bool):\n \"\"\"Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\"\"\"\n <|body_1|>\n\n def _decrease_loss_scale(self):\n \"\"\"Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DynamicLossScaler:\n \"\"\"Dynamically adjusts the loss scaling factor. Dynamic loss scalers are important in mixed-precision training. They help us avoid underflows and overflows in low-precision gradients. See here for information: Shamelessly stolen and adapted from Fairseq. \"\"\"\n\n def __init__(self, init_scale: float=2.0 ** 15, scale_factor: float=2.0, scale_window: int=2000, tolerance: float=0.0, threshold: float=None):\n \"\"\":param init_scale: Initial loss scale. :param scale_factor: Factor by which to increase or decrease loss scale. :param scale_window: If we do not experience overflow in scale_window iterations, loss scale will increase by scale_factor. :param tolerance: Pct of iterations that have overflowed after which we must decrease the loss scale :param threshold: If not None, loss scale will decrease below this threshold\"\"\"\n self.loss_scale = init_scale\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.tolerance = tolerance\n self.threshold = threshold\n self._iter = 0\n self._last_overflow_iter = -1\n self._last_rescale_iter = -1\n self._overflows_since_rescale = 0\n\n def update_scale(self, overflow: bool):\n \"\"\"Update the loss scale. If overflow exceeds our tolerance, we decrease the loss scale. If the number of iterations since the last overflow exceeds the scale window, we increase the loss scale.\"\"\"\n iter_since_rescale = self._iter - self._last_rescale_iter\n if overflow:\n self._last_overflow_iter = self._iter\n self._overflows_since_rescale += 1\n pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)\n if pct_overflow >= self.tolerance:\n self._decrease_loss_scale()\n self._last_rescale_iter = self._iter\n self._overflows_since_rescale = 0\n elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:\n self.loss_scale *= self.scale_factor\n self._last_rescale_iter = self._iter\n self._iter += 1\n\n def _decrease_loss_scale(self):\n \"\"\"Decrease the loss scale by self.scale_factor. NOTE: the loss_scale will not go below self.threshold.\"\"\"\n self.loss_scale /= self.scale_factor\n if self.threshold is not None:\n self.loss_scale = max(self.loss_scale, self.threshold)\n", "source": "the_stack_v2_python_sparse", "source_path": "parlai/utils/fp16.py", "source_repo": "facebookresearch/ParlAI", "split": "val", "star_events_count": 10943} {"blob_id": "a8fc46e817f96e4a28116e21595a47200ee57e35", "bodies": ["for project_id, backend_services in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id, 'id': backend_service.get('id'), 'creation_timestamp': parser.format_timestamp(backend_service.get('creationTimestamp'), self.MYSQL_DATETIME_FORMAT), 'name': backend_service.get('name'), 'description': backend_service.get('description'), 'affinity_cookie_ttl_sec': self._to_int(backend_service.get('affinityCookieTtlSec')), 'backends': parser.json_stringify(backend_service.get('backends', [])), 'cdn_policy': parser.json_stringify(backend_service.get('cdnPolicy', {})), 'connection_draining': parser.json_stringify(backend_service.get('connectionDraining', {})), 'enable_cdn': self._to_bool(backend_service.get('enableCDN')), 'health_checks': parser.json_stringify(backend_service.get('healthChecks', [])), 'iap': parser.json_stringify(backend_service.get('iap', {})), 'load_balancing_scheme': backend_service.get('loadBalancingScheme'), 'port': self._to_int(backend_service.get('port')), 'port_name': backend_service.get('portName'), 'protocol': backend_service.get('protocol'), 'region': backend_service.get('region'), 'session_affinity': backend_service.get('sessionAffinity'), 'timeout_sec': backend_service.get('timeoutSec'), 'raw_backend_service': parser.json_stringify(backend_service)}", "projects = proj_dao.ProjectDao(self.global_configs).get_projects(self.cycle_timestamp)\nbackend_services = {}\nfor project in projects:\n project_backend_services = self.safe_api_call('get_backend_services', project.id)\n if project_backend_services:\n backend_services[project.id] = project_backend_services\nreturn backend_services", "forwarding_rules = self._retrieve()\nloadable_rules = self._transform(forwarding_rules)\nself._load(self.RESOURCE_NAME, loadable_rules)\nself._get_loaded_count()"], "bodies_text": "<|body_start_0|>\n for project_id, backend_services in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id, 'id': backend_service.get('id'), 'creation_timestamp': parser.format_timestamp(backend_service.get('creationTimestamp'), self.MYSQL_DATETIME_FORMAT), 'name': backend_service.get('name'), 'description': backend_service.get('description'), 'affinity_cookie_ttl_sec': self._to_int(backend_service.get('affinityCookieTtlSec')), 'backends': parser.json_stringify(backend_service.get('backends', [])), 'cdn_policy': parser.json_stringify(backend_service.get('cdnPolicy', {})), 'connection_draining': parser.json_stringify(backend_service.get('connectionDraining', {})), 'enable_cdn': self._to_bool(backend_service.get('enableCDN')), 'health_checks': parser.json_stringify(backend_service.get('healthChecks', [])), 'iap': parser.json_stringify(backend_service.get('iap', {})), 'load_balancing_scheme': backend_service.get('loadBalancingScheme'), 'port': self._to_int(backend_service.get('port')), 'port_name': backend_service.get('portName'), 'protocol': backend_service.get('protocol'), 'region': backend_service.get('region'), 'session_affinity': backend_service.get('sessionAffinity'), 'timeout_sec': backend_service.get('timeoutSec'), 'raw_backend_service': parser.json_stringify(backend_service)}\n<|end_body_0|>\n\n<|body_start_1|>\n projects = proj_dao.ProjectDao(self.global_configs).get_projects(self.cycle_timestamp)\n backend_services = {}\n for project in projects:\n project_backend_services = self.safe_api_call('get_backend_services', project.id)\n if project_backend_services:\n backend_services[project.id] = project_backend_services\n return backend_services\n<|end_body_1|>\n\n<|body_start_2|>\n forwarding_rules = self._retrieve()\n loadable_rules = self._transform(forwarding_rules)\n self._load(self.RESOURCE_NAME, loadable_rules)\n self._get_loaded_count()\n<|end_body_2|>\n", "class_docstring": "Load compute backend services for all projects.", "class_name": "LoadBackendServicesPipeline", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoadBackendServicesPipeline:\n \"\"\"Load compute backend services for all projects.\"\"\"\n\n def _transform(self, resource_from_api):\n \"\"\"Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\"\"\"\n <|body_0|>\n\n def _retrieve(self):\n \"\"\"Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Run the pipeline.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for project_id, backend_services in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id, 'id': backend_service.get('id'), 'creation_timestamp': parser.format_timestamp(backend_service.get('creationTimestamp'), self.MYSQL_DATETIME_FORMAT), 'name': backend_service.get('name'), 'description': backend_service.get('description'), 'affinity_cookie_ttl_sec': self._to_int(backend_service.get('affinityCookieTtlSec')), 'backends': parser.json_stringify(backend_service.get('backends', [])), 'cdn_policy': parser.json_stringify(backend_service.get('cdnPolicy', {})), 'connection_draining': parser.json_stringify(backend_service.get('connectionDraining', {})), 'enable_cdn': self._to_bool(backend_service.get('enableCDN')), 'health_checks': parser.json_stringify(backend_service.get('healthChecks', [])), 'iap': parser.json_stringify(backend_service.get('iap', {})), 'load_balancing_scheme': backend_service.get('loadBalancingScheme'), 'port': self._to_int(backend_service.get('port')), 'port_name': backend_service.get('portName'), 'protocol': backend_service.get('protocol'), 'region': backend_service.get('region'), 'session_affinity': backend_service.get('sessionAffinity'), 'timeout_sec': backend_service.get('timeoutSec'), 'raw_backend_service': parser.json_stringify(backend_service)}\n<|end_body_0|>\n\n<|body_start_1|>\n projects = proj_dao.ProjectDao(self.global_configs).get_projects(self.cycle_timestamp)\n backend_services = {}\n for project in projects:\n project_backend_services = self.safe_api_call('get_backend_services', project.id)\n if project_backend_services:\n backend_services[project.id] = project_backend_services\n return backend_services\n<|end_body_1|>\n\n<|body_start_2|>\n forwarding_rules = self._retrieve()\n loadable_rules = self._transform(forwarding_rules)\n self._load(self.RESOURCE_NAME, loadable_rules)\n self._get_loaded_count()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000192", "length_bytes": 4698, "license_type": "permissive", "methods": [{"docstring": "Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.", "name": "_transform", "signature": "def _transform(self, resource_from_api)"}, {"docstring": "Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}", "name": "_retrieve", "signature": "def _retrieve(self)"}, {"docstring": "Run the pipeline.", "name": "run", "signature": "def run(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006121", "prompt": "Implement the Python class `LoadBackendServicesPipeline` described below.\n\nClass description:\nLoad compute backend services for all projects.\n\nMethod signatures and docstrings:\n- def _transform(self, resource_from_api): Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\n- def _retrieve(self): Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\n- def run(self): Run the pipeline.", "prompted_full_text": "Implement the Python class `LoadBackendServicesPipeline` described below.\n\nClass description:\nLoad compute backend services for all projects.\n\nMethod signatures and docstrings:\n- def _transform(self, resource_from_api): Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\n- def _retrieve(self): Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\n- def run(self): Run the pipeline.\n\n<|skeleton|>\nclass LoadBackendServicesPipeline:\n \"\"\"Load compute backend services for all projects.\"\"\"\n\n def _transform(self, resource_from_api):\n \"\"\"Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\"\"\"\n <|body_0|>\n\n def _retrieve(self):\n \"\"\"Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Run the pipeline.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for project_id, backend_services in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id, 'id': backend_service.get('id'), 'creation_timestamp': parser.format_timestamp(backend_service.get('creationTimestamp'), self.MYSQL_DATETIME_FORMAT), 'name': backend_service.get('name'), 'description': backend_service.get('description'), 'affinity_cookie_ttl_sec': self._to_int(backend_service.get('affinityCookieTtlSec')), 'backends': parser.json_stringify(backend_service.get('backends', [])), 'cdn_policy': parser.json_stringify(backend_service.get('cdnPolicy', {})), 'connection_draining': parser.json_stringify(backend_service.get('connectionDraining', {})), 'enable_cdn': self._to_bool(backend_service.get('enableCDN')), 'health_checks': parser.json_stringify(backend_service.get('healthChecks', [])), 'iap': parser.json_stringify(backend_service.get('iap', {})), 'load_balancing_scheme': backend_service.get('loadBalancingScheme'), 'port': self._to_int(backend_service.get('port')), 'port_name': backend_service.get('portName'), 'protocol': backend_service.get('protocol'), 'region': backend_service.get('region'), 'session_affinity': backend_service.get('sessionAffinity'), 'timeout_sec': backend_service.get('timeoutSec'), 'raw_backend_service': parser.json_stringify(backend_service)}\n<|end_body_0|>\n\n<|body_start_1|>\n projects = proj_dao.ProjectDao(self.global_configs).get_projects(self.cycle_timestamp)\n backend_services = {}\n for project in projects:\n project_backend_services = self.safe_api_call('get_backend_services', project.id)\n if project_backend_services:\n backend_services[project.id] = project_backend_services\n return backend_services\n<|end_body_1|>\n\n<|body_start_2|>\n forwarding_rules = self._retrieve()\n loadable_rules = self._transform(forwarding_rules)\n self._load(self.RESOURCE_NAME, loadable_rules)\n self._get_loaded_count()\n<|end_body_2|>\n", "revision_id": "a6a1aa7464cda2ad5948e3e8876eb8dded5e2514", "skeleton": "<|skeleton|>\nclass LoadBackendServicesPipeline:\n \"\"\"Load compute backend services for all projects.\"\"\"\n\n def _transform(self, resource_from_api):\n \"\"\"Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\"\"\"\n <|body_0|>\n\n def _retrieve(self):\n \"\"\"Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\"\"\"\n <|body_1|>\n\n def run(self):\n \"\"\"Run the pipeline.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoadBackendServicesPipeline:\n \"\"\"Load compute backend services for all projects.\"\"\"\n\n def _transform(self, resource_from_api):\n \"\"\"Create an iterator of backend services to load into database. Args: resource_from_api (dict): Forwarding rules, keyed by project id, from GCP API. Yields: iterator: backend service properties in a dict.\"\"\"\n for project_id, backend_services in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id, 'id': backend_service.get('id'), 'creation_timestamp': parser.format_timestamp(backend_service.get('creationTimestamp'), self.MYSQL_DATETIME_FORMAT), 'name': backend_service.get('name'), 'description': backend_service.get('description'), 'affinity_cookie_ttl_sec': self._to_int(backend_service.get('affinityCookieTtlSec')), 'backends': parser.json_stringify(backend_service.get('backends', [])), 'cdn_policy': parser.json_stringify(backend_service.get('cdnPolicy', {})), 'connection_draining': parser.json_stringify(backend_service.get('connectionDraining', {})), 'enable_cdn': self._to_bool(backend_service.get('enableCDN')), 'health_checks': parser.json_stringify(backend_service.get('healthChecks', [])), 'iap': parser.json_stringify(backend_service.get('iap', {})), 'load_balancing_scheme': backend_service.get('loadBalancingScheme'), 'port': self._to_int(backend_service.get('port')), 'port_name': backend_service.get('portName'), 'protocol': backend_service.get('protocol'), 'region': backend_service.get('region'), 'session_affinity': backend_service.get('sessionAffinity'), 'timeout_sec': backend_service.get('timeoutSec'), 'raw_backend_service': parser.json_stringify(backend_service)}\n\n def _retrieve(self):\n \"\"\"Retrieve backend services from GCP. Get all the projects in the current snapshot and retrieve the compute backend services for each. Returns: dict: Mapping projects with their backend services (list): {project_id: [backend_services]}\"\"\"\n projects = proj_dao.ProjectDao(self.global_configs).get_projects(self.cycle_timestamp)\n backend_services = {}\n for project in projects:\n project_backend_services = self.safe_api_call('get_backend_services', project.id)\n if project_backend_services:\n backend_services[project.id] = project_backend_services\n return backend_services\n\n def run(self):\n \"\"\"Run the pipeline.\"\"\"\n forwarding_rules = self._retrieve()\n loadable_rules = self._transform(forwarding_rules)\n self._load(self.RESOURCE_NAME, loadable_rules)\n self._get_loaded_count()\n", "source": "the_stack_v2_python_sparse", "source_path": "google/cloud/security/inventory/pipelines/load_backend_services_pipeline.py", "source_repo": "shimizu19691210/forseti-security", "split": "val", "star_events_count": 1} {"blob_id": "484ab52a9b288846d4ff055d7a46687ac2ba512d", "bodies": ["super(NormalizeImage, self).__init__()\nself.mean = mean\nself.std = std\nself.is_scale = is_scale\nself.is_channel_first = is_channel_first\nif not (isinstance(self.mean, list) and isinstance(self.std, list) and isinstance(self.is_scale, bool)):\n raise TypeError('{}: input type is invalid.'.format(self))\nfrom functools import reduce\nif reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))", "for k in sample.keys():\n if 'image' in k:\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\nreturn sample"], "bodies_text": "<|body_start_0|>\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and isinstance(self.is_scale, bool)):\n raise TypeError('{}: input type is invalid.'.format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n<|end_body_0|>\n\n<|body_start_1|>\n for k in sample.keys():\n if 'image' in k:\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n return sample\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NormalizeImage", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NormalizeImage:\n\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True):\n \"\"\"Args: mean (list): the pixel mean std (list): the pixel variance\"\"\"\n <|body_0|>\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and isinstance(self.is_scale, bool)):\n raise TypeError('{}: input type is invalid.'.format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n<|end_body_0|>\n\n<|body_start_1|>\n for k in sample.keys():\n if 'image' in k:\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n return sample\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000193", "length_bytes": 39037, "license_type": "permissive", "methods": [{"docstring": "Args: mean (list): the pixel mean std (list): the pixel variance", "name": "__init__", "signature": "def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True)"}, {"docstring": "Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std", "name": "__call__", "signature": "def __call__(self, sample, context=None)"}], "n_methods": 2, "prompt": "Implement the Python class `NormalizeImage` described below.\n\nClass description:\nImplement the NormalizeImage class.\n\nMethod signatures and docstrings:\n- def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True): Args: mean (list): the pixel mean std (list): the pixel variance\n- def __call__(self, sample, context=None): Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std", "prompted_full_text": "Implement the Python class `NormalizeImage` described below.\n\nClass description:\nImplement the NormalizeImage class.\n\nMethod signatures and docstrings:\n- def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True): Args: mean (list): the pixel mean std (list): the pixel variance\n- def __call__(self, sample, context=None): Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std\n\n<|skeleton|>\nclass NormalizeImage:\n\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True):\n \"\"\"Args: mean (list): the pixel mean std (list): the pixel variance\"\"\"\n <|body_0|>\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and isinstance(self.is_scale, bool)):\n raise TypeError('{}: input type is invalid.'.format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n<|end_body_0|>\n\n<|body_start_1|>\n for k in sample.keys():\n if 'image' in k:\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n return sample\n<|end_body_1|>\n", "revision_id": "420527996b6da60ca401717a734329f126ed0680", "skeleton": "<|skeleton|>\nclass NormalizeImage:\n\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True):\n \"\"\"Args: mean (list): the pixel mean std (list): the pixel variance\"\"\"\n <|body_0|>\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NormalizeImage:\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1], is_scale=True, is_channel_first=True):\n \"\"\"Args: mean (list): the pixel mean std (list): the pixel variance\"\"\"\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and isinstance(self.is_scale, bool)):\n raise TypeError('{}: input type is invalid.'.format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image. Operators: 1.(optional) Scale the image to [0,1] 2. Each pixel minus mean and is divided by std\"\"\"\n for k in sample.keys():\n if 'image' in k:\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n return sample\n", "source": "the_stack_v2_python_sparse", "source_path": "PaddleCV/PaddleDetection/ppdet/data/transform/operators.py", "source_repo": "chenbjin/models", "split": "val", "star_events_count": 3} {"blob_id": "e563ab1ccfc045a090bbdc0ecbeb8deb61be53bb", "bodies": ["self.gsutil_bin_dir = gsutil_bin_dir\nself.boto_lib_dir = boto_lib_dir\nself.config_file_list = config_file_list\nself.gsutil_ver = gsutil_ver\nself.bucket_storage_uri_class = bucket_storage_uri_class\nself.command_map = self._LoadCommandMap()", "commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\nfor f in os.listdir(commands_dir):\n module_name, ext = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\ncommand_map = {}\nfor command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\nreturn command_map", "if not args:\n args = []\napi_version = boto.config.get_value('GSUtil', 'default_api_version', '1')\nif not headers:\n headers = {}\nheaders['x-goog-api-version'] = api_version\nif command_name not in self.command_map:\n raise CommandException('Invalid command \"%s\".' % command_name)\ncommand_class = self.command_map[command_name]\ncommand_inst = command_class(self, args, headers, debug, parallel_operations, self.gsutil_bin_dir, self.boto_lib_dir, self.config_file_list, self.gsutil_ver, self.bucket_storage_uri_class, test_method, bypass_prodaccess)\nreturn command_inst.RunCommand()"], "bodies_text": "<|body_start_0|>\n self.gsutil_bin_dir = gsutil_bin_dir\n self.boto_lib_dir = boto_lib_dir\n self.config_file_list = config_file_list\n self.gsutil_ver = gsutil_ver\n self.bucket_storage_uri_class = bucket_storage_uri_class\n self.command_map = self._LoadCommandMap()\n<|end_body_0|>\n\n<|body_start_1|>\n commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\n for f in os.listdir(commands_dir):\n module_name, ext = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\n command_map = {}\n for command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\n return command_map\n<|end_body_1|>\n\n<|body_start_2|>\n if not args:\n args = []\n api_version = boto.config.get_value('GSUtil', 'default_api_version', '1')\n if not headers:\n headers = {}\n headers['x-goog-api-version'] = api_version\n if command_name not in self.command_map:\n raise CommandException('Invalid command \"%s\".' % command_name)\n command_class = self.command_map[command_name]\n command_inst = command_class(self, args, headers, debug, parallel_operations, self.gsutil_bin_dir, self.boto_lib_dir, self.config_file_list, self.gsutil_ver, self.bucket_storage_uri_class, test_method, bypass_prodaccess)\n return command_inst.RunCommand()\n<|end_body_2|>\n", "class_docstring": "", "class_name": "CommandRunner", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-free-unknown", "GPL-1.0-or-later", "Python-2.0", "LicenseRef-scancode-python-cwi", "LicenseRef-scancode-other-copyleft", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommandRunner:\n\n def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri):\n \"\"\"Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\"\"\"\n <|body_0|>\n\n def _LoadCommandMap(self):\n \"\"\"Returns dict mapping each command_name to implementing class.\"\"\"\n <|body_1|>\n\n def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True):\n \"\"\"Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.gsutil_bin_dir = gsutil_bin_dir\n self.boto_lib_dir = boto_lib_dir\n self.config_file_list = config_file_list\n self.gsutil_ver = gsutil_ver\n self.bucket_storage_uri_class = bucket_storage_uri_class\n self.command_map = self._LoadCommandMap()\n<|end_body_0|>\n\n<|body_start_1|>\n commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\n for f in os.listdir(commands_dir):\n module_name, ext = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\n command_map = {}\n for command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\n return command_map\n<|end_body_1|>\n\n<|body_start_2|>\n if not args:\n args = []\n api_version = boto.config.get_value('GSUtil', 'default_api_version', '1')\n if not headers:\n headers = {}\n headers['x-goog-api-version'] = api_version\n if command_name not in self.command_map:\n raise CommandException('Invalid command \"%s\".' % command_name)\n command_class = self.command_map[command_name]\n command_inst = command_class(self, args, headers, debug, parallel_operations, self.gsutil_bin_dir, self.boto_lib_dir, self.config_file_list, self.gsutil_ver, self.bucket_storage_uri_class, test_method, bypass_prodaccess)\n return command_inst.RunCommand()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000194", "length_bytes": 4281, "license_type": "permissive", "methods": [{"docstring": "Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.", "name": "__init__", "signature": "def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri)"}, {"docstring": "Returns dict mapping each command_name to implementing class.", "name": "_LoadCommandMap", "signature": "def _LoadCommandMap(self)"}, {"docstring": "Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.", "name": "RunNamedCommand", "signature": "def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002509", "prompt": "Implement the Python class `CommandRunner` described below.\n\nClass description:\nImplement the CommandRunner class.\n\nMethod signatures and docstrings:\n- def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri): Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\n- def _LoadCommandMap(self): Returns dict mapping each command_name to implementing class.\n- def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True): Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.", "prompted_full_text": "Implement the Python class `CommandRunner` described below.\n\nClass description:\nImplement the CommandRunner class.\n\nMethod signatures and docstrings:\n- def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri): Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\n- def _LoadCommandMap(self): Returns dict mapping each command_name to implementing class.\n- def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True): Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.\n\n<|skeleton|>\nclass CommandRunner:\n\n def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri):\n \"\"\"Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\"\"\"\n <|body_0|>\n\n def _LoadCommandMap(self):\n \"\"\"Returns dict mapping each command_name to implementing class.\"\"\"\n <|body_1|>\n\n def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True):\n \"\"\"Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.gsutil_bin_dir = gsutil_bin_dir\n self.boto_lib_dir = boto_lib_dir\n self.config_file_list = config_file_list\n self.gsutil_ver = gsutil_ver\n self.bucket_storage_uri_class = bucket_storage_uri_class\n self.command_map = self._LoadCommandMap()\n<|end_body_0|>\n\n<|body_start_1|>\n commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\n for f in os.listdir(commands_dir):\n module_name, ext = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\n command_map = {}\n for command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\n return command_map\n<|end_body_1|>\n\n<|body_start_2|>\n if not args:\n args = []\n api_version = boto.config.get_value('GSUtil', 'default_api_version', '1')\n if not headers:\n headers = {}\n headers['x-goog-api-version'] = api_version\n if command_name not in self.command_map:\n raise CommandException('Invalid command \"%s\".' % command_name)\n command_class = self.command_map[command_name]\n command_inst = command_class(self, args, headers, debug, parallel_operations, self.gsutil_bin_dir, self.boto_lib_dir, self.config_file_list, self.gsutil_ver, self.bucket_storage_uri_class, test_method, bypass_prodaccess)\n return command_inst.RunCommand()\n<|end_body_2|>\n", "revision_id": "9171447efcf0bb393d41d1dc877c7c13c46d8e38", "skeleton": "<|skeleton|>\nclass CommandRunner:\n\n def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri):\n \"\"\"Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\"\"\"\n <|body_0|>\n\n def _LoadCommandMap(self):\n \"\"\"Returns dict mapping each command_name to implementing class.\"\"\"\n <|body_1|>\n\n def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True):\n \"\"\"Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommandRunner:\n def __init__(self, gsutil_bin_dir, boto_lib_dir, config_file_list, gsutil_ver, bucket_storage_uri_class=BucketStorageUri):\n \"\"\"Args: gsutil_bin_dir: Bin dir from which gsutil is running. boto_lib_dir: Lib dir where boto runs. config_file_list: Config file list returned by _GetBotoConfigFileList(). gsutil_ver: Version string of currently running gsutil command. bucket_storage_uri_class: Class to instantiate for cloud StorageUris. Settable for testing/mocking.\"\"\"\n self.gsutil_bin_dir = gsutil_bin_dir\n self.boto_lib_dir = boto_lib_dir\n self.config_file_list = config_file_list\n self.gsutil_ver = gsutil_ver\n self.bucket_storage_uri_class = bucket_storage_uri_class\n self.command_map = self._LoadCommandMap()\n\n def _LoadCommandMap(self):\n \"\"\"Returns dict mapping each command_name to implementing class.\"\"\"\n commands_dir = os.path.join(self.gsutil_bin_dir, 'gslib', 'commands')\n for f in os.listdir(commands_dir):\n module_name, ext = os.path.splitext(f)\n if ext == '.py':\n __import__('gslib.commands.%s' % module_name)\n command_map = {}\n for command in Command.__subclasses__():\n command_map[command.command_spec[COMMAND_NAME]] = command\n for command_name_aliases in command.command_spec[COMMAND_NAME_ALIASES]:\n command_map[command_name_aliases] = command\n return command_map\n\n def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, parallel_operations=False, test_method=None, bypass_prodaccess=True):\n \"\"\"Runs the named command. Used by gsutil main, commands built atop other commands, and tests . Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). parallel_operations: Should command operations be executed in parallel? test_method: Optional general purpose method for testing purposes. Application and semantics of this method will vary by command and test type. Raises: CommandException: if errors encountered.\"\"\"\n if not args:\n args = []\n api_version = boto.config.get_value('GSUtil', 'default_api_version', '1')\n if not headers:\n headers = {}\n headers['x-goog-api-version'] = api_version\n if command_name not in self.command_map:\n raise CommandException('Invalid command \"%s\".' % command_name)\n command_class = self.command_map[command_name]\n command_inst = command_class(self, args, headers, debug, parallel_operations, self.gsutil_bin_dir, self.boto_lib_dir, self.config_file_list, self.gsutil_ver, self.bucket_storage_uri_class, test_method, bypass_prodaccess)\n return command_inst.RunCommand()\n", "source": "the_stack_v2_python_sparse", "source_path": "depot_tools/third_party/gsutil/gslib/command_runner.py", "source_repo": "webosce/chromium53", "split": "val", "star_events_count": 0} {"blob_id": "3883f3cc197a9d057f224cfd7c288049048629d8", "bodies": ["if model._meta.app_label == 'auditoria':\n return 'logs'\nreturn None", "if model._meta.app_label == 'auditoria':\n return 'logs'\nreturn None", "if obj1._meta.app_label == 'auditoria' or obj2._meta.app_label == 'auditoria':\n return True\nreturn None", "if app_label == 'auditoria':\n return db == 'logs'\nreturn None"], "bodies_text": "<|body_start_0|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if obj1._meta.app_label == 'auditoria' or obj2._meta.app_label == 'auditoria':\n return True\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if app_label == 'auditoria':\n return db == 'logs'\n return None\n<|end_body_3|>\n", "class_docstring": "A router to control all database operations on models in the auditoria application.", "class_name": "LogRouter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogRouter:\n \"\"\"A router to control all database operations on models in the auditoria application.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"Attempts to read AuditoriaLog models go to logs.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Attempts to write AuditoriaLog models go to logs.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow relations if a model in the AuditoriaLog app is involved.\"\"\"\n <|body_2|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Make sure the auditoria app only appears in the 'logs' database.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if obj1._meta.app_label == 'auditoria' or obj2._meta.app_label == 'auditoria':\n return True\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if app_label == 'auditoria':\n return db == 'logs'\n return None\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000195", "length_bytes": 1118, "license_type": "no_license", "methods": [{"docstring": "Attempts to read AuditoriaLog models go to logs.", "name": "db_for_read", "signature": "def db_for_read(self, model, **hints)"}, {"docstring": "Attempts to write AuditoriaLog models go to logs.", "name": "db_for_write", "signature": "def db_for_write(self, model, **hints)"}, {"docstring": "Allow relations if a model in the AuditoriaLog app is involved.", "name": "allow_relation", "signature": "def allow_relation(self, obj1, obj2, **hints)"}, {"docstring": "Make sure the auditoria app only appears in the 'logs' database.", "name": "allow_migrate", "signature": "def allow_migrate(self, db, app_label, model_name=None, **hints)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000050", "prompt": "Implement the Python class `LogRouter` described below.\n\nClass description:\nA router to control all database operations on models in the auditoria application.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): Attempts to read AuditoriaLog models go to logs.\n- def db_for_write(self, model, **hints): Attempts to write AuditoriaLog models go to logs.\n- def allow_relation(self, obj1, obj2, **hints): Allow relations if a model in the AuditoriaLog app is involved.\n- def allow_migrate(self, db, app_label, model_name=None, **hints): Make sure the auditoria app only appears in the 'logs' database.", "prompted_full_text": "Implement the Python class `LogRouter` described below.\n\nClass description:\nA router to control all database operations on models in the auditoria application.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): Attempts to read AuditoriaLog models go to logs.\n- def db_for_write(self, model, **hints): Attempts to write AuditoriaLog models go to logs.\n- def allow_relation(self, obj1, obj2, **hints): Allow relations if a model in the AuditoriaLog app is involved.\n- def allow_migrate(self, db, app_label, model_name=None, **hints): Make sure the auditoria app only appears in the 'logs' database.\n\n<|skeleton|>\nclass LogRouter:\n \"\"\"A router to control all database operations on models in the auditoria application.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"Attempts to read AuditoriaLog models go to logs.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Attempts to write AuditoriaLog models go to logs.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow relations if a model in the AuditoriaLog app is involved.\"\"\"\n <|body_2|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Make sure the auditoria app only appears in the 'logs' database.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if obj1._meta.app_label == 'auditoria' or obj2._meta.app_label == 'auditoria':\n return True\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n if app_label == 'auditoria':\n return db == 'logs'\n return None\n<|end_body_3|>\n", "revision_id": "16b04f9c3e520f7ca54a1cc28ede3e1e533a33a5", "skeleton": "<|skeleton|>\nclass LogRouter:\n \"\"\"A router to control all database operations on models in the auditoria application.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"Attempts to read AuditoriaLog models go to logs.\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"Attempts to write AuditoriaLog models go to logs.\"\"\"\n <|body_1|>\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow relations if a model in the AuditoriaLog app is involved.\"\"\"\n <|body_2|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Make sure the auditoria app only appears in the 'logs' database.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LogRouter:\n \"\"\"A router to control all database operations on models in the auditoria application.\"\"\"\n\n def db_for_read(self, model, **hints):\n \"\"\"Attempts to read AuditoriaLog models go to logs.\"\"\"\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n\n def db_for_write(self, model, **hints):\n \"\"\"Attempts to write AuditoriaLog models go to logs.\"\"\"\n if model._meta.app_label == 'auditoria':\n return 'logs'\n return None\n\n def allow_relation(self, obj1, obj2, **hints):\n \"\"\"Allow relations if a model in the AuditoriaLog app is involved.\"\"\"\n if obj1._meta.app_label == 'auditoria' or obj2._meta.app_label == 'auditoria':\n return True\n return None\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Make sure the auditoria app only appears in the 'logs' database.\"\"\"\n if app_label == 'auditoria':\n return db == 'logs'\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "huellas/auditoria/dbrouters.py", "source_repo": "MarioPayan/asdasdwqe", "split": "val", "star_events_count": 0} {"blob_id": "05e527cd0a8dbbb35cfcc000e19d4875dcdfb1bc", "bodies": ["G = self.parent()\nL = G.splitting_field()\na = G._pari_data.galoispermtopol(pari(self.domain()).Vecsmall())\nP = L._pari_absolute_structure()[1].lift()\na = L(P(a.Mod(L.pari_polynomial('y'))))\nreturn L.hom(a, L)", "if x.parent() == self.parent().splitting_field():\n return self.as_hom()(x)\nelse:\n return self.as_hom()(self.parent()._gc_map(x))", "if not self.parent().is_galois():\n raise TypeError('Ramification degree only defined for Galois extensions')\ngens = self.parent().number_field().ring_of_integers().ring_generators()\nw = [(self(g) - g).valuation(P) for g in gens]\nreturn min(w)"], "bodies_text": "<|body_start_0|>\n G = self.parent()\n L = G.splitting_field()\n a = G._pari_data.galoispermtopol(pari(self.domain()).Vecsmall())\n P = L._pari_absolute_structure()[1].lift()\n a = L(P(a.Mod(L.pari_polynomial('y'))))\n return L.hom(a, L)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.parent() == self.parent().splitting_field():\n return self.as_hom()(x)\n else:\n return self.as_hom()(self.parent()._gc_map(x))\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.parent().is_galois():\n raise TypeError('Ramification degree only defined for Galois extensions')\n gens = self.parent().number_field().ring_of_integers().ring_generators()\n w = [(self(g) - g).valuation(P) for g in gens]\n return min(w)\n<|end_body_2|>\n", "class_docstring": "An element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4", "class_name": "GaloisGroupElement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GaloisGroupElement:\n \"\"\"An element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\"\"\"\n\n def as_hom(self):\n \"\"\"Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\"\"\"\n <|body_0|>\n\n def __call__(self, x):\n \"\"\"Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\"\"\"\n <|body_1|>\n\n def ramification_degree(self, P):\n \"\"\"Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n G = self.parent()\n L = G.splitting_field()\n a = G._pari_data.galoispermtopol(pari(self.domain()).Vecsmall())\n P = L._pari_absolute_structure()[1].lift()\n a = L(P(a.Mod(L.pari_polynomial('y'))))\n return L.hom(a, L)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.parent() == self.parent().splitting_field():\n return self.as_hom()(x)\n else:\n return self.as_hom()(self.parent()._gc_map(x))\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.parent().is_galois():\n raise TypeError('Ramification degree only defined for Galois extensions')\n gens = self.parent().number_field().ring_of_integers().ring_generators()\n w = [(self(g) - g).valuation(P) for g in gens]\n return min(w)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000196", "length_bytes": 27204, "license_type": "no_license", "methods": [{"docstring": "Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic", "name": "as_hom", "signature": "def as_hom(self)"}, {"docstring": "Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w", "name": "__call__", "signature": "def __call__(self, x)"}, {"docstring": "Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4", "name": "ramification_degree", "signature": "def ramification_degree(self, P)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002188", "prompt": "Implement the Python class `GaloisGroupElement` described below.\n\nClass description:\nAn element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\n\nMethod signatures and docstrings:\n- def as_hom(self): Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\n- def __call__(self, x): Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\n- def ramification_degree(self, P): Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4", "prompted_full_text": "Implement the Python class `GaloisGroupElement` described below.\n\nClass description:\nAn element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\n\nMethod signatures and docstrings:\n- def as_hom(self): Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\n- def __call__(self, x): Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\n- def ramification_degree(self, P): Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4\n\n<|skeleton|>\nclass GaloisGroupElement:\n \"\"\"An element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\"\"\"\n\n def as_hom(self):\n \"\"\"Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\"\"\"\n <|body_0|>\n\n def __call__(self, x):\n \"\"\"Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\"\"\"\n <|body_1|>\n\n def ramification_degree(self, P):\n \"\"\"Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n G = self.parent()\n L = G.splitting_field()\n a = G._pari_data.galoispermtopol(pari(self.domain()).Vecsmall())\n P = L._pari_absolute_structure()[1].lift()\n a = L(P(a.Mod(L.pari_polynomial('y'))))\n return L.hom(a, L)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.parent() == self.parent().splitting_field():\n return self.as_hom()(x)\n else:\n return self.as_hom()(self.parent()._gc_map(x))\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.parent().is_galois():\n raise TypeError('Ramification degree only defined for Galois extensions')\n gens = self.parent().number_field().ring_of_integers().ring_generators()\n w = [(self(g) - g).valuation(P) for g in gens]\n return min(w)\n<|end_body_2|>\n", "revision_id": "0d9eacbf74e2acffefde93e39f8bcbec745cdaba", "skeleton": "<|skeleton|>\nclass GaloisGroupElement:\n \"\"\"An element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\"\"\"\n\n def as_hom(self):\n \"\"\"Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\"\"\"\n <|body_0|>\n\n def __call__(self, x):\n \"\"\"Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\"\"\"\n <|body_1|>\n\n def ramification_degree(self, P):\n \"\"\"Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GaloisGroupElement:\n \"\"\"An element of a Galois group. This is stored as a permutation, but may also be made to act on elements of the field (generally returning elements of its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7); G = K.galois_group() sage: G[1] (1,2) sage: G[1](w + 2) -w + 2 sage: L. = NumberField(x^3 - 2); G = L.galois_group(names='y') sage: G[4] (1,5)(2,4)(3,6) sage: G[4](v) 1/18*y^4 sage: G[4](G[4](v)) -1/36*y^4 - 1/2*y sage: G[4](G[4](G[4](v))) 1/18*y^4\"\"\"\n\n def as_hom(self):\n \"\"\"Return the homomorphism L -> L corresponding to self, where L is the Galois closure of the ambient number field. EXAMPLE:: sage: G = QuadraticField(-7,'w').galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in w with defining polynomial x^2 + 7 Defn: w |--> -w TESTS: Number fields defined by non-monic and non-integral polynomials are supported (:trac:`252`):: sage: R. = QQ[] sage: f = 7/9*x^3 + 7/3*x^2 - 56*x + 123 sage: K. = NumberField(f) sage: G = K.galois_group() sage: G[1].as_hom() Ring endomorphism of Number Field in a with defining polynomial 7/9*x^3 + 7/3*x^2 - 56*x + 123 Defn: a |--> -7/15*a^2 - 18/5*a + 96/5 sage: prod(x - sigma(a) for sigma in G) == f.monic\"\"\"\n G = self.parent()\n L = G.splitting_field()\n a = G._pari_data.galoispermtopol(pari(self.domain()).Vecsmall())\n P = L._pari_absolute_structure()[1].lift()\n a = L(P(a.Mod(L.pari_polynomial('y'))))\n return L.hom(a, L)\n\n def __call__(self, x):\n \"\"\"Return the action of self on an element x in the number field of self (or its Galois closure). EXAMPLE:: sage: K. = QuadraticField(-7) sage: f = K.galois_group()[1] sage: f(w) -w\"\"\"\n if x.parent() == self.parent().splitting_field():\n return self.as_hom()(x)\n else:\n return self.as_hom()(self.parent()._gc_map(x))\n\n def ramification_degree(self, P):\n \"\"\"Return the greatest value of v such that s acts trivially modulo P^v. Should only be used if P is prime and s is in the decomposition group of P. EXAMPLE:: sage: K. = NumberField(x^3 - 3, 'a').galois_closure() sage: G = K.galois_group() sage: P = K.primes_above(3)[0] sage: s = hom(K, K, 1/18*b^4 - 1/2*b) sage: G(s).ramification_degree(P) 4\"\"\"\n if not self.parent().is_galois():\n raise TypeError('Ramification degree only defined for Galois extensions')\n gens = self.parent().number_field().ring_of_integers().ring_generators()\n w = [(self(g) - g).valuation(P) for g in gens]\n return min(w)\n", "source": "the_stack_v2_python_sparse", "source_path": "sage/src/sage/rings/number_field/galois_group.py", "source_repo": "bopopescu/geosci", "split": "val", "star_events_count": 0} {"blob_id": "c0b05c2a0f8bcf0a678f65b56cf671df51ac7ed0", "bodies": ["value_lines = []\ntry:\n file = open(file_name + '\\\\' + file_path, 'r')\n try:\n print('读取的文件为:%s' % file_name + '\\\\' + file_path)\n value_lines = file.readlines()\n for line in value_lines:\n text_line = line.split('\\n')\n value_lines.append(text_line[0])\n finally:\n file.close()\nexcept IOError as err:\n print('IOError:{0}'.format(err))\nreturn value_lines", "try:\n file = open(file_name, 'a')\n try:\n file.writelines(text_value)\n finally:\n file.close()\nexcept IOError as err:\n print('IOError:{0}'.format(err))\nelse:\n print(u'在%s写入%s成功' % (file_name, text_value))"], "bodies_text": "<|body_start_0|>\n value_lines = []\n try:\n file = open(file_name + '\\\\' + file_path, 'r')\n try:\n print('读取的文件为:%s' % file_name + '\\\\' + file_path)\n value_lines = file.readlines()\n for line in value_lines:\n text_line = line.split('\\n')\n value_lines.append(text_line[0])\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n return value_lines\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(file_name, 'a')\n try:\n file.writelines(text_value)\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n else:\n print(u'在%s写入%s成功' % (file_name, text_value))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UseText", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UseText:\n\n def read_text(file_name, file_path):\n \"\"\"读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\"\"\"\n <|body_0|>\n\n def write_text(file_name, text_value):\n \"\"\"写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value_lines = []\n try:\n file = open(file_name + '\\\\' + file_path, 'r')\n try:\n print('读取的文件为:%s' % file_name + '\\\\' + file_path)\n value_lines = file.readlines()\n for line in value_lines:\n text_line = line.split('\\n')\n value_lines.append(text_line[0])\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n return value_lines\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(file_name, 'a')\n try:\n file.writelines(text_value)\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n else:\n print(u'在%s写入%s成功' % (file_name, text_value))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000197", "length_bytes": 1728, "license_type": "no_license", "methods": [{"docstring": "读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容", "name": "read_text", "signature": "def read_text(file_name, file_path)"}, {"docstring": "写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:", "name": "write_text", "signature": "def write_text(file_name, text_value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003590", "prompt": "Implement the Python class `UseText` described below.\n\nClass description:\nImplement the UseText class.\n\nMethod signatures and docstrings:\n- def read_text(file_name, file_path): 读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\n- def write_text(file_name, text_value): 写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:", "prompted_full_text": "Implement the Python class `UseText` described below.\n\nClass description:\nImplement the UseText class.\n\nMethod signatures and docstrings:\n- def read_text(file_name, file_path): 读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\n- def write_text(file_name, text_value): 写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:\n\n<|skeleton|>\nclass UseText:\n\n def read_text(file_name, file_path):\n \"\"\"读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\"\"\"\n <|body_0|>\n\n def write_text(file_name, text_value):\n \"\"\"写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value_lines = []\n try:\n file = open(file_name + '\\\\' + file_path, 'r')\n try:\n print('读取的文件为:%s' % file_name + '\\\\' + file_path)\n value_lines = file.readlines()\n for line in value_lines:\n text_line = line.split('\\n')\n value_lines.append(text_line[0])\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n return value_lines\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(file_name, 'a')\n try:\n file.writelines(text_value)\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n else:\n print(u'在%s写入%s成功' % (file_name, text_value))\n<|end_body_1|>\n", "revision_id": "e09df64a0b19ad128152a9fb6c9e73e6271207bb", "skeleton": "<|skeleton|>\nclass UseText:\n\n def read_text(file_name, file_path):\n \"\"\"读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\"\"\"\n <|body_0|>\n\n def write_text(file_name, text_value):\n \"\"\"写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UseText:\n def read_text(file_name, file_path):\n \"\"\"读取text文档文本 :param file_name: text文档文本名 :param file_path: text文档文本路径 :return: 以list的类型返回text中的内容\"\"\"\n value_lines = []\n try:\n file = open(file_name + '\\\\' + file_path, 'r')\n try:\n print('读取的文件为:%s' % file_name + '\\\\' + file_path)\n value_lines = file.readlines()\n for line in value_lines:\n text_line = line.split('\\n')\n value_lines.append(text_line[0])\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n return value_lines\n\n def write_text(file_name, text_value):\n \"\"\"写入Text文本文档 :param file_name: text文本文档名(路径) :param text_value: 写入内容(值) :return:\"\"\"\n try:\n file = open(file_name, 'a')\n try:\n file.writelines(text_value)\n finally:\n file.close()\n except IOError as err:\n print('IOError:{0}'.format(err))\n else:\n print(u'在%s写入%s成功' % (file_name, text_value))\n", "source": "the_stack_v2_python_sparse", "source_path": "common/use_text.py", "source_repo": "wallaceok/GoldGarden", "split": "val", "star_events_count": 0} {"blob_id": "6ced3c0472633753126be4992303a1f4c315f026", "bodies": ["assert isinstance(target_config, NormalTrainingConfig)\nassert isinstance(attack_config, AttackConfig)\ntarget_config.validate()\nattack_config.validate()\nself.target_config = target_config\n' (NormalTrainingConfig) Config. '\nself.attack_config = attack_config\n' (AttackConfig) Config. '\nself.log_dir = None\n' (str) Log directory. '\nself.model_file = None\n' (str) Model file. '\nself.perturbations_file = None\n' (str) Perturbations file. '\nself.cuda = None\n' (bool) Whether to use CUDA. '\nself.writer = None\n' (common.summary.SummaryWriter or torch.utils.tensorboard.SumamryWriter) Summary writer. '\nself.testloader = None\n' (torch.utils.data.DataLoader) Test loader. '\nself.model = None\n' (torch.nn.Module) Model. '", "self.log_dir = common.paths.log_dir('%s/%s' % (self.target_config.directory, self.attack_config.directory))\nself.perturbations_file = common.paths.experiment_file('%s/%s' % (self.target_config.directory, self.attack_config.directory), 'perturbations', common.paths.HDF5_EXT)\nself.model_file = common.paths.experiment_file(self.target_config.directory, 'classifier', common.paths.STATE_EXT)\nif self.attack_config.snapshot is not None:\n self.log_dir = common.paths.log_dir('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot))\n self.perturbations_file = common.paths.experiment_file('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot), 'perturbations', common.paths.HDF5_EXT)\n self.model_file += '.%d' % self.attack_config.snapshot\nassert os.path.exists(self.model_file), 'file %s not found' % self.model_file\nattempts = 0\nsamples = 0\nif os.path.exists(self.perturbations_file):\n errors = common.utils.read_hdf5(self.perturbations_file, key='errors')\n attempts = errors.shape[0]\n samples = errors.shape[1]\nif not os.path.exists(self.perturbations_file) or attempts < self.attack_config.attempts or samples < len(self.attack_config.testloader.dataset):\n self.cuda = self.target_config.cuda\n if callable(self.attack_config.get_writer):\n self.writer = common.utils.partial(self.attack_config.get_writer, self.log_dir)\n else:\n self.writer = self.attack_config.get_writer\n state = common.state.State.load(self.model_file)\n self.model = state.model\n if self.cuda:\n self.model = self.model.cuda()\n self.model.eval()\n perturbations, probabilities, errors = common.test.attack(self.model, self.attack_config.testloader, self.attack_config.attack, self.attack_config.objective, attempts=self.attack_config.attempts, writer=self.writer, cuda=self.cuda)\n common.utils.write_hdf5(self.perturbations_file, [perturbations, probabilities, errors], ['perturbations', 'probabilities', 'errors'])"], "bodies_text": "<|body_start_0|>\n assert isinstance(target_config, NormalTrainingConfig)\n assert isinstance(attack_config, AttackConfig)\n target_config.validate()\n attack_config.validate()\n self.target_config = target_config\n ' (NormalTrainingConfig) Config. '\n self.attack_config = attack_config\n ' (AttackConfig) Config. '\n self.log_dir = None\n ' (str) Log directory. '\n self.model_file = None\n ' (str) Model file. '\n self.perturbations_file = None\n ' (str) Perturbations file. '\n self.cuda = None\n ' (bool) Whether to use CUDA. '\n self.writer = None\n ' (common.summary.SummaryWriter or torch.utils.tensorboard.SumamryWriter) Summary writer. '\n self.testloader = None\n ' (torch.utils.data.DataLoader) Test loader. '\n self.model = None\n ' (torch.nn.Module) Model. '\n<|end_body_0|>\n\n<|body_start_1|>\n self.log_dir = common.paths.log_dir('%s/%s' % (self.target_config.directory, self.attack_config.directory))\n self.perturbations_file = common.paths.experiment_file('%s/%s' % (self.target_config.directory, self.attack_config.directory), 'perturbations', common.paths.HDF5_EXT)\n self.model_file = common.paths.experiment_file(self.target_config.directory, 'classifier', common.paths.STATE_EXT)\n if self.attack_config.snapshot is not None:\n self.log_dir = common.paths.log_dir('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot))\n self.perturbations_file = common.paths.experiment_file('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot), 'perturbations', common.paths.HDF5_EXT)\n self.model_file += '.%d' % self.attack_config.snapshot\n assert os.path.exists(self.model_file), 'file %s not found' % self.model_file\n attempts = 0\n samples = 0\n if os.path.exists(self.perturbations_file):\n errors = common.utils.read_hdf5(self.perturbations_file, key='errors')\n attempts = errors.shape[0]\n samples = errors.shape[1]\n if not os.path.exists(self.perturbations_file) or attempts < self.attack_config.attempts or samples < len(self.attack_config.testloader.dataset):\n self.cuda = self.target_config.cuda\n if callable(self.attack_config.get_writer):\n self.writer = common.utils.partial(self.attack_config.get_writer, self.log_dir)\n else:\n self.writer = self.attack_config.get_writer\n state = common.state.State.load(self.model_file)\n self.model = state.model\n if self.cuda:\n self.model = self.model.cuda()\n self.model.eval()\n perturbations, probabilities, errors = common.test.attack(self.model, self.attack_config.testloader, self.attack_config.attack, self.attack_config.objective, attempts=self.attack_config.attempts, writer=self.writer, cuda=self.cuda)\n common.utils.write_hdf5(self.perturbations_file, [perturbations, probabilities, errors], ['perturbations', 'probabilities', 'errors'])\n<|end_body_1|>\n", "class_docstring": "Regular attack interface.", "class_name": "AttackInterface", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AttackInterface:\n \"\"\"Regular attack interface.\"\"\"\n\n def __init__(self, target_config, attack_config):\n \"\"\"Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\"\"\"\n <|body_0|>\n\n def main(self):\n \"\"\"Main.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(target_config, NormalTrainingConfig)\n assert isinstance(attack_config, AttackConfig)\n target_config.validate()\n attack_config.validate()\n self.target_config = target_config\n ' (NormalTrainingConfig) Config. '\n self.attack_config = attack_config\n ' (AttackConfig) Config. '\n self.log_dir = None\n ' (str) Log directory. '\n self.model_file = None\n ' (str) Model file. '\n self.perturbations_file = None\n ' (str) Perturbations file. '\n self.cuda = None\n ' (bool) Whether to use CUDA. '\n self.writer = None\n ' (common.summary.SummaryWriter or torch.utils.tensorboard.SumamryWriter) Summary writer. '\n self.testloader = None\n ' (torch.utils.data.DataLoader) Test loader. '\n self.model = None\n ' (torch.nn.Module) Model. '\n<|end_body_0|>\n\n<|body_start_1|>\n self.log_dir = common.paths.log_dir('%s/%s' % (self.target_config.directory, self.attack_config.directory))\n self.perturbations_file = common.paths.experiment_file('%s/%s' % (self.target_config.directory, self.attack_config.directory), 'perturbations', common.paths.HDF5_EXT)\n self.model_file = common.paths.experiment_file(self.target_config.directory, 'classifier', common.paths.STATE_EXT)\n if self.attack_config.snapshot is not None:\n self.log_dir = common.paths.log_dir('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot))\n self.perturbations_file = common.paths.experiment_file('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot), 'perturbations', common.paths.HDF5_EXT)\n self.model_file += '.%d' % self.attack_config.snapshot\n assert os.path.exists(self.model_file), 'file %s not found' % self.model_file\n attempts = 0\n samples = 0\n if os.path.exists(self.perturbations_file):\n errors = common.utils.read_hdf5(self.perturbations_file, key='errors')\n attempts = errors.shape[0]\n samples = errors.shape[1]\n if not os.path.exists(self.perturbations_file) or attempts < self.attack_config.attempts or samples < len(self.attack_config.testloader.dataset):\n self.cuda = self.target_config.cuda\n if callable(self.attack_config.get_writer):\n self.writer = common.utils.partial(self.attack_config.get_writer, self.log_dir)\n else:\n self.writer = self.attack_config.get_writer\n state = common.state.State.load(self.model_file)\n self.model = state.model\n if self.cuda:\n self.model = self.model.cuda()\n self.model.eval()\n perturbations, probabilities, errors = common.test.attack(self.model, self.attack_config.testloader, self.attack_config.attack, self.attack_config.objective, attempts=self.attack_config.attempts, writer=self.writer, cuda=self.cuda)\n common.utils.write_hdf5(self.perturbations_file, [perturbations, probabilities, errors], ['perturbations', 'probabilities', 'errors'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000198", "length_bytes": 16771, "license_type": "no_license", "methods": [{"docstring": "Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]", "name": "__init__", "signature": "def __init__(self, target_config, attack_config)"}, {"docstring": "Main.", "name": "main", "signature": "def main(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003618", "prompt": "Implement the Python class `AttackInterface` described below.\n\nClass description:\nRegular attack interface.\n\nMethod signatures and docstrings:\n- def __init__(self, target_config, attack_config): Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\n- def main(self): Main.", "prompted_full_text": "Implement the Python class `AttackInterface` described below.\n\nClass description:\nRegular attack interface.\n\nMethod signatures and docstrings:\n- def __init__(self, target_config, attack_config): Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\n- def main(self): Main.\n\n<|skeleton|>\nclass AttackInterface:\n \"\"\"Regular attack interface.\"\"\"\n\n def __init__(self, target_config, attack_config):\n \"\"\"Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\"\"\"\n <|body_0|>\n\n def main(self):\n \"\"\"Main.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(target_config, NormalTrainingConfig)\n assert isinstance(attack_config, AttackConfig)\n target_config.validate()\n attack_config.validate()\n self.target_config = target_config\n ' (NormalTrainingConfig) Config. '\n self.attack_config = attack_config\n ' (AttackConfig) Config. '\n self.log_dir = None\n ' (str) Log directory. '\n self.model_file = None\n ' (str) Model file. '\n self.perturbations_file = None\n ' (str) Perturbations file. '\n self.cuda = None\n ' (bool) Whether to use CUDA. '\n self.writer = None\n ' (common.summary.SummaryWriter or torch.utils.tensorboard.SumamryWriter) Summary writer. '\n self.testloader = None\n ' (torch.utils.data.DataLoader) Test loader. '\n self.model = None\n ' (torch.nn.Module) Model. '\n<|end_body_0|>\n\n<|body_start_1|>\n self.log_dir = common.paths.log_dir('%s/%s' % (self.target_config.directory, self.attack_config.directory))\n self.perturbations_file = common.paths.experiment_file('%s/%s' % (self.target_config.directory, self.attack_config.directory), 'perturbations', common.paths.HDF5_EXT)\n self.model_file = common.paths.experiment_file(self.target_config.directory, 'classifier', common.paths.STATE_EXT)\n if self.attack_config.snapshot is not None:\n self.log_dir = common.paths.log_dir('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot))\n self.perturbations_file = common.paths.experiment_file('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot), 'perturbations', common.paths.HDF5_EXT)\n self.model_file += '.%d' % self.attack_config.snapshot\n assert os.path.exists(self.model_file), 'file %s not found' % self.model_file\n attempts = 0\n samples = 0\n if os.path.exists(self.perturbations_file):\n errors = common.utils.read_hdf5(self.perturbations_file, key='errors')\n attempts = errors.shape[0]\n samples = errors.shape[1]\n if not os.path.exists(self.perturbations_file) or attempts < self.attack_config.attempts or samples < len(self.attack_config.testloader.dataset):\n self.cuda = self.target_config.cuda\n if callable(self.attack_config.get_writer):\n self.writer = common.utils.partial(self.attack_config.get_writer, self.log_dir)\n else:\n self.writer = self.attack_config.get_writer\n state = common.state.State.load(self.model_file)\n self.model = state.model\n if self.cuda:\n self.model = self.model.cuda()\n self.model.eval()\n perturbations, probabilities, errors = common.test.attack(self.model, self.attack_config.testloader, self.attack_config.attack, self.attack_config.objective, attempts=self.attack_config.attempts, writer=self.writer, cuda=self.cuda)\n common.utils.write_hdf5(self.perturbations_file, [perturbations, probabilities, errors], ['perturbations', 'probabilities', 'errors'])\n<|end_body_1|>\n", "revision_id": "736c99b55a77d0c650eae5ced2d8312d13af0baf", "skeleton": "<|skeleton|>\nclass AttackInterface:\n \"\"\"Regular attack interface.\"\"\"\n\n def __init__(self, target_config, attack_config):\n \"\"\"Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\"\"\"\n <|body_0|>\n\n def main(self):\n \"\"\"Main.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AttackInterface:\n \"\"\"Regular attack interface.\"\"\"\n\n def __init__(self, target_config, attack_config):\n \"\"\"Initialize. :param target_config: configuration :type target_config: [str] :param attack_config: configuration :type attack_config: [str]\"\"\"\n assert isinstance(target_config, NormalTrainingConfig)\n assert isinstance(attack_config, AttackConfig)\n target_config.validate()\n attack_config.validate()\n self.target_config = target_config\n ' (NormalTrainingConfig) Config. '\n self.attack_config = attack_config\n ' (AttackConfig) Config. '\n self.log_dir = None\n ' (str) Log directory. '\n self.model_file = None\n ' (str) Model file. '\n self.perturbations_file = None\n ' (str) Perturbations file. '\n self.cuda = None\n ' (bool) Whether to use CUDA. '\n self.writer = None\n ' (common.summary.SummaryWriter or torch.utils.tensorboard.SumamryWriter) Summary writer. '\n self.testloader = None\n ' (torch.utils.data.DataLoader) Test loader. '\n self.model = None\n ' (torch.nn.Module) Model. '\n\n def main(self):\n \"\"\"Main.\"\"\"\n self.log_dir = common.paths.log_dir('%s/%s' % (self.target_config.directory, self.attack_config.directory))\n self.perturbations_file = common.paths.experiment_file('%s/%s' % (self.target_config.directory, self.attack_config.directory), 'perturbations', common.paths.HDF5_EXT)\n self.model_file = common.paths.experiment_file(self.target_config.directory, 'classifier', common.paths.STATE_EXT)\n if self.attack_config.snapshot is not None:\n self.log_dir = common.paths.log_dir('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot))\n self.perturbations_file = common.paths.experiment_file('%s/%s_%d' % (self.target_config.directory, self.attack_config.directory, self.attack_config.snapshot), 'perturbations', common.paths.HDF5_EXT)\n self.model_file += '.%d' % self.attack_config.snapshot\n assert os.path.exists(self.model_file), 'file %s not found' % self.model_file\n attempts = 0\n samples = 0\n if os.path.exists(self.perturbations_file):\n errors = common.utils.read_hdf5(self.perturbations_file, key='errors')\n attempts = errors.shape[0]\n samples = errors.shape[1]\n if not os.path.exists(self.perturbations_file) or attempts < self.attack_config.attempts or samples < len(self.attack_config.testloader.dataset):\n self.cuda = self.target_config.cuda\n if callable(self.attack_config.get_writer):\n self.writer = common.utils.partial(self.attack_config.get_writer, self.log_dir)\n else:\n self.writer = self.attack_config.get_writer\n state = common.state.State.load(self.model_file)\n self.model = state.model\n if self.cuda:\n self.model = self.model.cuda()\n self.model.eval()\n perturbations, probabilities, errors = common.test.attack(self.model, self.attack_config.testloader, self.attack_config.attack, self.attack_config.objective, attempts=self.attack_config.attempts, writer=self.writer, cuda=self.cuda)\n common.utils.write_hdf5(self.perturbations_file, [perturbations, probabilities, errors], ['perturbations', 'probabilities', 'errors'])\n", "source": "the_stack_v2_python_sparse", "source_path": "common/experiments.py", "source_repo": "Adversarial-Intelligence-Group/color-adversarial-training", "split": "val", "star_events_count": 0} {"blob_id": "8ce267f8fa803237e168c068c01fb1538a10e3ca", "bodies": ["result = []\n\ndef dfs(root):\n if root == None:\n result.append('null')\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\ndfs(root)\nreturn ','.join(result)", "array = data.split(',')\n\ndef dfs(array):\n if len(array) == 0:\n return None\n first = array.pop(0)\n if first == 'null':\n return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\nreturn dfs(array)"], "bodies_text": "<|body_start_0|>\n result = []\n\n def dfs(root):\n if root == None:\n result.append('null')\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n array = data.split(',')\n\n def dfs(array):\n if len(array) == 0:\n return None\n first = array.pop(0)\n if first == 'null':\n return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n\n def dfs(root):\n if root == None:\n result.append('null')\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n array = data.split(',')\n\n def dfs(array):\n if len(array) == 0:\n return None\n first = array.pop(0)\n if first == 'null':\n return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000199", "length_bytes": 1061, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006021", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n\n def dfs(root):\n if root == None:\n result.append('null')\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n array = data.split(',')\n\n def dfs(array):\n if len(array) == 0:\n return None\n first = array.pop(0)\n if first == 'null':\n return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)\n<|end_body_1|>\n", "revision_id": "2faa46323df991a12014021b49d568387a882233", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n result = []\n\n def dfs(root):\n if root == None:\n result.append('null')\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n array = data.split(',')\n\n def dfs(array):\n if len(array) == 0:\n return None\n first = array.pop(0)\n if first == 'null':\n return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)\n", "source": "the_stack_v2_python_sparse", "source_path": "python-leetcode/297.py", "source_repo": "MDGSF/JustCoding", "split": "val", "star_events_count": 15} {"blob_id": "58223a5755289cb41d090f31f16a1f492c9f3b46", "bodies": ["Amino.__init__(self, atoms, ref)\nself.reference = ref\nself.SSbonded = 0\nself.SSbondedpartner = None", "if 'CYX' in self.patches or self.name == 'CYX':\n self.ffname = 'CYX'\nelif self.SSbonded:\n self.ffname = 'CYX'\nelif 'CYM' in self.patches or self.name == 'CYM':\n self.ffname = 'CYM'\nelif not self.hasAtom('HG'):\n self.ffname = 'CYX'\nAmino.setState(self)"], "bodies_text": "<|body_start_0|>\n Amino.__init__(self, atoms, ref)\n self.reference = ref\n self.SSbonded = 0\n self.SSbondedpartner = None\n<|end_body_0|>\n\n<|body_start_1|>\n if 'CYX' in self.patches or self.name == 'CYX':\n self.ffname = 'CYX'\n elif self.SSbonded:\n self.ffname = 'CYX'\n elif 'CYM' in self.patches or self.name == 'CYM':\n self.ffname = 'CYM'\n elif not self.hasAtom('HG'):\n self.ffname = 'CYX'\n Amino.setState(self)\n<|end_body_1|>\n", "class_docstring": "Cysteine class This class gives data about the Cysteine object, and inherits off the base residue class.", "class_name": "CYS", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CYS:\n \"\"\"Cysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\"\"\"\n\n def __init__(self, atoms, ref):\n \"\"\"Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\"\"\"\n <|body_0|>\n\n def setState(self):\n \"\"\"Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Amino.__init__(self, atoms, ref)\n self.reference = ref\n self.SSbonded = 0\n self.SSbondedpartner = None\n<|end_body_0|>\n\n<|body_start_1|>\n if 'CYX' in self.patches or self.name == 'CYX':\n self.ffname = 'CYX'\n elif self.SSbonded:\n self.ffname = 'CYX'\n elif 'CYM' in self.patches or self.name == 'CYM':\n self.ffname = 'CYM'\n elif not self.hasAtom('HG'):\n self.ffname = 'CYX'\n Amino.setState(self)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000200", "length_bytes": 22508, "license_type": "permissive", "methods": [{"docstring": "Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)", "name": "__init__", "signature": "def __init__(self, atoms, ref)"}, {"docstring": "Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.", "name": "setState", "signature": "def setState(self)"}], "n_methods": 2, "prompt": "Implement the Python class `CYS` described below.\n\nClass description:\nCysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\n\nMethod signatures and docstrings:\n- def __init__(self, atoms, ref): Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\n- def setState(self): Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.", "prompted_full_text": "Implement the Python class `CYS` described below.\n\nClass description:\nCysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\n\nMethod signatures and docstrings:\n- def __init__(self, atoms, ref): Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\n- def setState(self): Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.\n\n<|skeleton|>\nclass CYS:\n \"\"\"Cysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\"\"\"\n\n def __init__(self, atoms, ref):\n \"\"\"Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\"\"\"\n <|body_0|>\n\n def setState(self):\n \"\"\"Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Amino.__init__(self, atoms, ref)\n self.reference = ref\n self.SSbonded = 0\n self.SSbondedpartner = None\n<|end_body_0|>\n\n<|body_start_1|>\n if 'CYX' in self.patches or self.name == 'CYX':\n self.ffname = 'CYX'\n elif self.SSbonded:\n self.ffname = 'CYX'\n elif 'CYM' in self.patches or self.name == 'CYM':\n self.ffname = 'CYM'\n elif not self.hasAtom('HG'):\n self.ffname = 'CYX'\n Amino.setState(self)\n<|end_body_1|>\n", "revision_id": "a50f0b2f7104007c730baa51b4ec65c891008c47", "skeleton": "<|skeleton|>\nclass CYS:\n \"\"\"Cysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\"\"\"\n\n def __init__(self, atoms, ref):\n \"\"\"Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\"\"\"\n <|body_0|>\n\n def setState(self):\n \"\"\"Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CYS:\n \"\"\"Cysteine class This class gives data about the Cysteine object, and inherits off the base residue class.\"\"\"\n\n def __init__(self, atoms, ref):\n \"\"\"Initialize the class Parameters atoms: A list of Atom objects to be stored in this class (list)\"\"\"\n Amino.__init__(self, atoms, ref)\n self.reference = ref\n self.SSbonded = 0\n self.SSbondedpartner = None\n\n def setState(self):\n \"\"\"Set the state of the CYS object. If SS-bonded, use CYX. If negatively charged, use CYM. If HG is not present, use CYX.\"\"\"\n if 'CYX' in self.patches or self.name == 'CYX':\n self.ffname = 'CYX'\n elif self.SSbonded:\n self.ffname = 'CYX'\n elif 'CYM' in self.patches or self.name == 'CYM':\n self.ffname = 'CYM'\n elif not self.hasAtom('HG'):\n self.ffname = 'CYX'\n Amino.setState(self)\n", "source": "the_stack_v2_python_sparse", "source_path": "mscreen/autodocktools_prepare_py3k/MolKit/pdb2pqr/src/aa.py", "source_repo": "e-mayo/mscreen", "split": "val", "star_events_count": 10} {"blob_id": "c11272ce6271903bba6c7ec0e3d90bb09cb6c505", "bodies": ["self.user = user\nself.password = password\nif host:\n self.host = host", "if not from_addr:\n from_addr = self.user\ndata = 'From: %s\\nTo: %s\\nSubject: %s\\n\\n%s' % (from_addr, to_addrs, subject, message)\ntry:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\nexcept:\n raise\ntry:\n server.quit()\nexcept:\n pass"], "bodies_text": "<|body_start_0|>\n self.user = user\n self.password = password\n if host:\n self.host = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not from_addr:\n from_addr = self.user\n data = 'From: %s\\nTo: %s\\nSubject: %s\\n\\n%s' % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit()\n except:\n pass\n<|end_body_1|>\n", "class_docstring": "Send email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])", "class_name": "Gmailer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Gmailer:\n \"\"\"Send email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n\n def __init__(self, user, password, host=None):\n \"\"\"Set Google username and passsword. use: Gmailer(user, password[, host])\"\"\"\n <|body_0|>\n\n def send(self, to_addrs, subject, message, from_addr=None):\n \"\"\"Set username and passsword use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = user\n self.password = password\n if host:\n self.host = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not from_addr:\n from_addr = self.user\n data = 'From: %s\\nTo: %s\\nSubject: %s\\n\\n%s' % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit()\n except:\n pass\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000201", "length_bytes": 1781, "license_type": "no_license", "methods": [{"docstring": "Set Google username and passsword. use: Gmailer(user, password[, host])", "name": "__init__", "signature": "def __init__(self, user, password, host=None)"}, {"docstring": "Set username and passsword use: send(to_addrs, subject, message[, from_addrs])", "name": "send", "signature": "def send(self, to_addrs, subject, message, from_addr=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001923", "prompt": "Implement the Python class `Gmailer` described below.\n\nClass description:\nSend email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\n\nMethod signatures and docstrings:\n- def __init__(self, user, password, host=None): Set Google username and passsword. use: Gmailer(user, password[, host])\n- def send(self, to_addrs, subject, message, from_addr=None): Set username and passsword use: send(to_addrs, subject, message[, from_addrs])", "prompted_full_text": "Implement the Python class `Gmailer` described below.\n\nClass description:\nSend email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\n\nMethod signatures and docstrings:\n- def __init__(self, user, password, host=None): Set Google username and passsword. use: Gmailer(user, password[, host])\n- def send(self, to_addrs, subject, message, from_addr=None): Set username and passsword use: send(to_addrs, subject, message[, from_addrs])\n\n<|skeleton|>\nclass Gmailer:\n \"\"\"Send email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n\n def __init__(self, user, password, host=None):\n \"\"\"Set Google username and passsword. use: Gmailer(user, password[, host])\"\"\"\n <|body_0|>\n\n def send(self, to_addrs, subject, message, from_addr=None):\n \"\"\"Set username and passsword use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = user\n self.password = password\n if host:\n self.host = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not from_addr:\n from_addr = self.user\n data = 'From: %s\\nTo: %s\\nSubject: %s\\n\\n%s' % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit()\n except:\n pass\n<|end_body_1|>\n", "revision_id": "b02b9025add538a927538122558778c505a6c37b", "skeleton": "<|skeleton|>\nclass Gmailer:\n \"\"\"Send email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n\n def __init__(self, user, password, host=None):\n \"\"\"Set Google username and passsword. use: Gmailer(user, password[, host])\"\"\"\n <|body_0|>\n\n def send(self, to_addrs, subject, message, from_addr=None):\n \"\"\"Set username and passsword use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Gmailer:\n \"\"\"Send email through Gmail. use: Gmailer(user, password[, host]) use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n\n def __init__(self, user, password, host=None):\n \"\"\"Set Google username and passsword. use: Gmailer(user, password[, host])\"\"\"\n self.user = user\n self.password = password\n if host:\n self.host = host\n\n def send(self, to_addrs, subject, message, from_addr=None):\n \"\"\"Set username and passsword use: send(to_addrs, subject, message[, from_addrs])\"\"\"\n if not from_addr:\n from_addr = self.user\n data = 'From: %s\\nTo: %s\\nSubject: %s\\n\\n%s' % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit()\n except:\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "libs/gmailer.py", "source_repo": "yezooz/24goals", "split": "val", "star_events_count": 0} {"blob_id": "9877e8e2fa65092ca98d590584bc5f0f7b15644d", "bodies": ["if sandbox_id in sandboxes:\n return (sandboxes[sandbox_id].to_dict(), 200)\nelse:\n return ('', 200)", "if sandbox_id not in sandboxes:\n return (None, 404)\nelse:\n sandbox = sandboxes[sandbox_id]\n sandbox.stop()\n return ({}, 204)"], "bodies_text": "<|body_start_0|>\n if sandbox_id in sandboxes:\n return (sandboxes[sandbox_id].to_dict(), 200)\n else:\n return ('', 200)\n<|end_body_0|>\n\n<|body_start_1|>\n if sandbox_id not in sandboxes:\n return (None, 404)\n else:\n sandbox = sandboxes[sandbox_id]\n sandbox.stop()\n return ({}, 204)\n<|end_body_1|>\n", "class_docstring": "The sandbox REST resource.", "class_name": "Sandbox", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sandbox:\n \"\"\"The sandbox REST resource.\"\"\"\n\n def get(self, sandbox_id):\n \"\"\"Get the current instance of the sandbox.\"\"\"\n <|body_0|>\n\n def delete(self, sandbox_id):\n \"\"\"Delete the current sandbox instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sandbox_id in sandboxes:\n return (sandboxes[sandbox_id].to_dict(), 200)\n else:\n return ('', 200)\n<|end_body_0|>\n\n<|body_start_1|>\n if sandbox_id not in sandboxes:\n return (None, 404)\n else:\n sandbox = sandboxes[sandbox_id]\n sandbox.stop()\n return ({}, 204)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000202", "length_bytes": 11386, "license_type": "permissive", "methods": [{"docstring": "Get the current instance of the sandbox.", "name": "get", "signature": "def get(self, sandbox_id)"}, {"docstring": "Delete the current sandbox instance.", "name": "delete", "signature": "def delete(self, sandbox_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005095", "prompt": "Implement the Python class `Sandbox` described below.\n\nClass description:\nThe sandbox REST resource.\n\nMethod signatures and docstrings:\n- def get(self, sandbox_id): Get the current instance of the sandbox.\n- def delete(self, sandbox_id): Delete the current sandbox instance.", "prompted_full_text": "Implement the Python class `Sandbox` described below.\n\nClass description:\nThe sandbox REST resource.\n\nMethod signatures and docstrings:\n- def get(self, sandbox_id): Get the current instance of the sandbox.\n- def delete(self, sandbox_id): Delete the current sandbox instance.\n\n<|skeleton|>\nclass Sandbox:\n \"\"\"The sandbox REST resource.\"\"\"\n\n def get(self, sandbox_id):\n \"\"\"Get the current instance of the sandbox.\"\"\"\n <|body_0|>\n\n def delete(self, sandbox_id):\n \"\"\"Delete the current sandbox instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sandbox_id in sandboxes:\n return (sandboxes[sandbox_id].to_dict(), 200)\n else:\n return ('', 200)\n<|end_body_0|>\n\n<|body_start_1|>\n if sandbox_id not in sandboxes:\n return (None, 404)\n else:\n sandbox = sandboxes[sandbox_id]\n sandbox.stop()\n return ({}, 204)\n<|end_body_1|>\n", "revision_id": "33c4aa24ca8daf26f2c8f2d2fa38d7f4bf750cfa", "skeleton": "<|skeleton|>\nclass Sandbox:\n \"\"\"The sandbox REST resource.\"\"\"\n\n def get(self, sandbox_id):\n \"\"\"Get the current instance of the sandbox.\"\"\"\n <|body_0|>\n\n def delete(self, sandbox_id):\n \"\"\"Delete the current sandbox instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Sandbox:\n \"\"\"The sandbox REST resource.\"\"\"\n\n def get(self, sandbox_id):\n \"\"\"Get the current instance of the sandbox.\"\"\"\n if sandbox_id in sandboxes:\n return (sandboxes[sandbox_id].to_dict(), 200)\n else:\n return ('', 200)\n\n def delete(self, sandbox_id):\n \"\"\"Delete the current sandbox instance.\"\"\"\n if sandbox_id not in sandboxes:\n return (None, 404)\n else:\n sandbox = sandboxes[sandbox_id]\n sandbox.stop()\n return ({}, 204)\n", "source": "the_stack_v2_python_sparse", "source_path": "tac/gui/launcher/api/resources/sandboxes.py", "source_repo": "fetchai/agents-tac", "split": "val", "star_events_count": 30} {"blob_id": "eef1dec8dce7332730845825281d3197282ebc01", "bodies": ["super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=UPDATE_INTERVAL))\nsession = async_get_clientsession(hass)\nself.airq = AirQ(entry.data[CONF_IP_ADDRESS], entry.data[CONF_PASSWORD], session)\nself.device_id = entry.unique_id\nassert self.device_id is not None\nself.device_info = DeviceInfo(manufacturer=MANUFACTURER, identifiers={(DOMAIN, self.device_id)})", "if 'name' not in self.device_info:\n info = await self.airq.fetch_device_info()\n self.device_info.update(DeviceInfo(name=info['name'], model=info['model'], sw_version=info['sw_version'], hw_version=info['hw_version']))\ndata = await self.airq.get(TARGET_ROUTE)\nreturn self.airq.drop_uncertainties_from_data(data)"], "bodies_text": "<|body_start_0|>\n super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=UPDATE_INTERVAL))\n session = async_get_clientsession(hass)\n self.airq = AirQ(entry.data[CONF_IP_ADDRESS], entry.data[CONF_PASSWORD], session)\n self.device_id = entry.unique_id\n assert self.device_id is not None\n self.device_info = DeviceInfo(manufacturer=MANUFACTURER, identifiers={(DOMAIN, self.device_id)})\n<|end_body_0|>\n\n<|body_start_1|>\n if 'name' not in self.device_info:\n info = await self.airq.fetch_device_info()\n self.device_info.update(DeviceInfo(name=info['name'], model=info['model'], sw_version=info['sw_version'], hw_version=info['hw_version']))\n data = await self.airq.get(TARGET_ROUTE)\n return self.airq.drop_uncertainties_from_data(data)\n<|end_body_1|>\n", "class_docstring": "Coordinator is responsible for querying the device at a specified route.", "class_name": "AirQCoordinator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AirQCoordinator:\n \"\"\"Coordinator is responsible for querying the device at a specified route.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialise a custom coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict:\n \"\"\"Fetch the data from the device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=UPDATE_INTERVAL))\n session = async_get_clientsession(hass)\n self.airq = AirQ(entry.data[CONF_IP_ADDRESS], entry.data[CONF_PASSWORD], session)\n self.device_id = entry.unique_id\n assert self.device_id is not None\n self.device_info = DeviceInfo(manufacturer=MANUFACTURER, identifiers={(DOMAIN, self.device_id)})\n<|end_body_0|>\n\n<|body_start_1|>\n if 'name' not in self.device_info:\n info = await self.airq.fetch_device_info()\n self.device_info.update(DeviceInfo(name=info['name'], model=info['model'], sw_version=info['sw_version'], hw_version=info['hw_version']))\n data = await self.airq.get(TARGET_ROUTE)\n return self.airq.drop_uncertainties_from_data(data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000203", "length_bytes": 2031, "license_type": "permissive", "methods": [{"docstring": "Initialise a custom coordinator.", "name": "__init__", "signature": "def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None"}, {"docstring": "Fetch the data from the device.", "name": "_async_update_data", "signature": "async def _async_update_data(self) -> dict"}], "n_methods": 2, "prompt": "Implement the Python class `AirQCoordinator` described below.\n\nClass description:\nCoordinator is responsible for querying the device at a specified route.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None: Initialise a custom coordinator.\n- async def _async_update_data(self) -> dict: Fetch the data from the device.", "prompted_full_text": "Implement the Python class `AirQCoordinator` described below.\n\nClass description:\nCoordinator is responsible for querying the device at a specified route.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None: Initialise a custom coordinator.\n- async def _async_update_data(self) -> dict: Fetch the data from the device.\n\n<|skeleton|>\nclass AirQCoordinator:\n \"\"\"Coordinator is responsible for querying the device at a specified route.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialise a custom coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict:\n \"\"\"Fetch the data from the device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=UPDATE_INTERVAL))\n session = async_get_clientsession(hass)\n self.airq = AirQ(entry.data[CONF_IP_ADDRESS], entry.data[CONF_PASSWORD], session)\n self.device_id = entry.unique_id\n assert self.device_id is not None\n self.device_info = DeviceInfo(manufacturer=MANUFACTURER, identifiers={(DOMAIN, self.device_id)})\n<|end_body_0|>\n\n<|body_start_1|>\n if 'name' not in self.device_info:\n info = await self.airq.fetch_device_info()\n self.device_info.update(DeviceInfo(name=info['name'], model=info['model'], sw_version=info['sw_version'], hw_version=info['hw_version']))\n data = await self.airq.get(TARGET_ROUTE)\n return self.airq.drop_uncertainties_from_data(data)\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass AirQCoordinator:\n \"\"\"Coordinator is responsible for querying the device at a specified route.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialise a custom coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self) -> dict:\n \"\"\"Fetch the data from the device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AirQCoordinator:\n \"\"\"Coordinator is responsible for querying the device at a specified route.\"\"\"\n\n def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Initialise a custom coordinator.\"\"\"\n super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=UPDATE_INTERVAL))\n session = async_get_clientsession(hass)\n self.airq = AirQ(entry.data[CONF_IP_ADDRESS], entry.data[CONF_PASSWORD], session)\n self.device_id = entry.unique_id\n assert self.device_id is not None\n self.device_info = DeviceInfo(manufacturer=MANUFACTURER, identifiers={(DOMAIN, self.device_id)})\n\n async def _async_update_data(self) -> dict:\n \"\"\"Fetch the data from the device.\"\"\"\n if 'name' not in self.device_info:\n info = await self.airq.fetch_device_info()\n self.device_info.update(DeviceInfo(name=info['name'], model=info['model'], sw_version=info['sw_version'], hw_version=info['hw_version']))\n data = await self.airq.get(TARGET_ROUTE)\n return self.airq.drop_uncertainties_from_data(data)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/airq/coordinator.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "06770868690021bfa5ea42778e21ff7285bb20b1", "bodies": ["if A.shape[0] != A.shape[1]:\n print('Matrix must be square')\nself.dim = A.shape[0]\nif view is None:\n view = self.dim\nelse:\n assert view <= self.dim\nself._A_buf = A[:, :]\nself.n = view\nself.A = self._A_buf[:view, :view]", "assert view <= self.dim\nself.n = view\nself.A = self._A_buf[:view, :view]", "T = hessenberg(self.A)\ndiag = np.diag(T)[:cutoff]\noffdiag = np.diag(T, k=-1)[:cutoff - 1]\nreturn (diag, offdiag)", "T, Q = hessenberg(self.A, calc_q=True)\ndiag = np.diag(T)[:cutoff]\noffdiag = np.diag(T, k=-1)[:cutoff - 1]\nreturn (diag, offdiag, Q[:, :cutoff])", "if trafo is None:\n return (diag, np.abs(offdiag))\nelse:\n buffer = offdiag.copy()\n index = 0\n while index < len(buffer):\n if buffer[index] < 0:\n trafo[:, index + 1] = -trafo[:, index + 1]\n A_pos = trafo.T @ self.A @ trafo\n buffer = np.diag(A_pos, k=-1)\n index += 1\n assert np.all(buffer >= 0)\n return (diag, np.abs(offdiag), trafo)", "if cutoff is None:\n cutoff = self.n\nelse:\n assert 0 < cutoff <= self.n\ninfo = dict()\ninfo['trafo'] = None\ninfo['res'] = None\nif residual or get_trafo:\n diag, offdiag, Q = self._construct_hessenberg_with_trafo(cutoff)\n if positive and get_trafo:\n diag, offdiag, Q = self._make_positive(diag, offdiag, trafo=Q)\n else:\n diag, offdiag = self._make_positive(diag, offdiag)\n if get_trafo:\n info['trafo'] = Q\n if residual:\n info['res'] = orth_residual(Q)\nelse:\n diag, offdiag = self._construct_hessenberg(cutoff)\n if positive:\n diag, offdiag = self._make_positive(diag, offdiag)\nreturn (diag, offdiag, info)"], "bodies_text": "<|body_start_0|>\n if A.shape[0] != A.shape[1]:\n print('Matrix must be square')\n self.dim = A.shape[0]\n if view is None:\n view = self.dim\n else:\n assert view <= self.dim\n self._A_buf = A[:, :]\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_0|>\n\n<|body_start_1|>\n assert view <= self.dim\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_1|>\n\n<|body_start_2|>\n T = hessenberg(self.A)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag)\n<|end_body_2|>\n\n<|body_start_3|>\n T, Q = hessenberg(self.A, calc_q=True)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag, Q[:, :cutoff])\n<|end_body_3|>\n\n<|body_start_4|>\n if trafo is None:\n return (diag, np.abs(offdiag))\n else:\n buffer = offdiag.copy()\n index = 0\n while index < len(buffer):\n if buffer[index] < 0:\n trafo[:, index + 1] = -trafo[:, index + 1]\n A_pos = trafo.T @ self.A @ trafo\n buffer = np.diag(A_pos, k=-1)\n index += 1\n assert np.all(buffer >= 0)\n return (diag, np.abs(offdiag), trafo)\n<|end_body_4|>\n\n<|body_start_5|>\n if cutoff is None:\n cutoff = self.n\n else:\n assert 0 < cutoff <= self.n\n info = dict()\n info['trafo'] = None\n info['res'] = None\n if residual or get_trafo:\n diag, offdiag, Q = self._construct_hessenberg_with_trafo(cutoff)\n if positive and get_trafo:\n diag, offdiag, Q = self._make_positive(diag, offdiag, trafo=Q)\n else:\n diag, offdiag = self._make_positive(diag, offdiag)\n if get_trafo:\n info['trafo'] = Q\n if residual:\n info['res'] = orth_residual(Q)\n else:\n diag, offdiag = self._construct_hessenberg(cutoff)\n if positive:\n diag, offdiag = self._make_positive(diag, offdiag)\n return (diag, offdiag, info)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "ScipyHessenberg", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScipyHessenberg:\n\n def __init__(self, A, view=None):\n \"\"\"Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\"\"\"\n <|body_0|>\n\n def update_view(self, view):\n \"\"\"Updates the matrix view of A, which is tridiagonalized\"\"\"\n <|body_1|>\n\n def _construct_hessenberg(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\"\"\"\n <|body_2|>\n\n def _construct_hessenberg_with_trafo(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\"\"\"\n <|body_3|>\n\n def _make_positive(self, diag, offdiag, trafo=None):\n \"\"\"Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\"\"\"\n <|body_4|>\n\n def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True):\n \"\"\"Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if A.shape[0] != A.shape[1]:\n print('Matrix must be square')\n self.dim = A.shape[0]\n if view is None:\n view = self.dim\n else:\n assert view <= self.dim\n self._A_buf = A[:, :]\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_0|>\n\n<|body_start_1|>\n assert view <= self.dim\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_1|>\n\n<|body_start_2|>\n T = hessenberg(self.A)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag)\n<|end_body_2|>\n\n<|body_start_3|>\n T, Q = hessenberg(self.A, calc_q=True)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag, Q[:, :cutoff])\n<|end_body_3|>\n\n<|body_start_4|>\n if trafo is None:\n return (diag, np.abs(offdiag))\n else:\n buffer = offdiag.copy()\n index = 0\n while index < len(buffer):\n if buffer[index] < 0:\n trafo[:, index + 1] = -trafo[:, index + 1]\n A_pos = trafo.T @ self.A @ trafo\n buffer = np.diag(A_pos, k=-1)\n index += 1\n assert np.all(buffer >= 0)\n return (diag, np.abs(offdiag), trafo)\n<|end_body_4|>\n\n<|body_start_5|>\n if cutoff is None:\n cutoff = self.n\n else:\n assert 0 < cutoff <= self.n\n info = dict()\n info['trafo'] = None\n info['res'] = None\n if residual or get_trafo:\n diag, offdiag, Q = self._construct_hessenberg_with_trafo(cutoff)\n if positive and get_trafo:\n diag, offdiag, Q = self._make_positive(diag, offdiag, trafo=Q)\n else:\n diag, offdiag = self._make_positive(diag, offdiag)\n if get_trafo:\n info['trafo'] = Q\n if residual:\n info['res'] = orth_residual(Q)\n else:\n diag, offdiag = self._construct_hessenberg(cutoff)\n if positive:\n diag, offdiag = self._make_positive(diag, offdiag)\n return (diag, offdiag, info)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000204", "length_bytes": 5175, "license_type": "permissive", "methods": [{"docstring": "Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A", "name": "__init__", "signature": "def __init__(self, A, view=None)"}, {"docstring": "Updates the matrix view of A, which is tridiagonalized", "name": "update_view", "signature": "def update_view(self, view)"}, {"docstring": "Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix", "name": "_construct_hessenberg", "signature": "def _construct_hessenberg(self, cutoff)"}, {"docstring": "Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix", "name": "_construct_hessenberg_with_trafo", "signature": "def _construct_hessenberg_with_trafo(self, cutoff)"}, {"docstring": "Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a", "name": "_make_positive", "signature": "def _make_positive(self, diag, offdiag, trafo=None)"}, {"docstring": "Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr", "name": "get_tridiagonal", "signature": "def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_006642", "prompt": "Implement the Python class `ScipyHessenberg` described below.\n\nClass description:\nImplement the ScipyHessenberg class.\n\nMethod signatures and docstrings:\n- def __init__(self, A, view=None): Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\n- def update_view(self, view): Updates the matrix view of A, which is tridiagonalized\n- def _construct_hessenberg(self, cutoff): Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\n- def _construct_hessenberg_with_trafo(self, cutoff): Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\n- def _make_positive(self, diag, offdiag, trafo=None): Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\n- def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True): Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr", "prompted_full_text": "Implement the Python class `ScipyHessenberg` described below.\n\nClass description:\nImplement the ScipyHessenberg class.\n\nMethod signatures and docstrings:\n- def __init__(self, A, view=None): Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\n- def update_view(self, view): Updates the matrix view of A, which is tridiagonalized\n- def _construct_hessenberg(self, cutoff): Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\n- def _construct_hessenberg_with_trafo(self, cutoff): Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\n- def _make_positive(self, diag, offdiag, trafo=None): Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\n- def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True): Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr\n\n<|skeleton|>\nclass ScipyHessenberg:\n\n def __init__(self, A, view=None):\n \"\"\"Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\"\"\"\n <|body_0|>\n\n def update_view(self, view):\n \"\"\"Updates the matrix view of A, which is tridiagonalized\"\"\"\n <|body_1|>\n\n def _construct_hessenberg(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\"\"\"\n <|body_2|>\n\n def _construct_hessenberg_with_trafo(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\"\"\"\n <|body_3|>\n\n def _make_positive(self, diag, offdiag, trafo=None):\n \"\"\"Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\"\"\"\n <|body_4|>\n\n def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True):\n \"\"\"Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if A.shape[0] != A.shape[1]:\n print('Matrix must be square')\n self.dim = A.shape[0]\n if view is None:\n view = self.dim\n else:\n assert view <= self.dim\n self._A_buf = A[:, :]\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_0|>\n\n<|body_start_1|>\n assert view <= self.dim\n self.n = view\n self.A = self._A_buf[:view, :view]\n<|end_body_1|>\n\n<|body_start_2|>\n T = hessenberg(self.A)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag)\n<|end_body_2|>\n\n<|body_start_3|>\n T, Q = hessenberg(self.A, calc_q=True)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag, Q[:, :cutoff])\n<|end_body_3|>\n\n<|body_start_4|>\n if trafo is None:\n return (diag, np.abs(offdiag))\n else:\n buffer = offdiag.copy()\n index = 0\n while index < len(buffer):\n if buffer[index] < 0:\n trafo[:, index + 1] = -trafo[:, index + 1]\n A_pos = trafo.T @ self.A @ trafo\n buffer = np.diag(A_pos, k=-1)\n index += 1\n assert np.all(buffer >= 0)\n return (diag, np.abs(offdiag), trafo)\n<|end_body_4|>\n\n<|body_start_5|>\n if cutoff is None:\n cutoff = self.n\n else:\n assert 0 < cutoff <= self.n\n info = dict()\n info['trafo'] = None\n info['res'] = None\n if residual or get_trafo:\n diag, offdiag, Q = self._construct_hessenberg_with_trafo(cutoff)\n if positive and get_trafo:\n diag, offdiag, Q = self._make_positive(diag, offdiag, trafo=Q)\n else:\n diag, offdiag = self._make_positive(diag, offdiag)\n if get_trafo:\n info['trafo'] = Q\n if residual:\n info['res'] = orth_residual(Q)\n else:\n diag, offdiag = self._construct_hessenberg(cutoff)\n if positive:\n diag, offdiag = self._make_positive(diag, offdiag)\n return (diag, offdiag, info)\n<|end_body_5|>\n", "revision_id": "daf37f522f8acb6af2285d44f39cab31f34b01a4", "skeleton": "<|skeleton|>\nclass ScipyHessenberg:\n\n def __init__(self, A, view=None):\n \"\"\"Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\"\"\"\n <|body_0|>\n\n def update_view(self, view):\n \"\"\"Updates the matrix view of A, which is tridiagonalized\"\"\"\n <|body_1|>\n\n def _construct_hessenberg(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\"\"\"\n <|body_2|>\n\n def _construct_hessenberg_with_trafo(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\"\"\"\n <|body_3|>\n\n def _make_positive(self, diag, offdiag, trafo=None):\n \"\"\"Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\"\"\"\n <|body_4|>\n\n def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True):\n \"\"\"Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ScipyHessenberg:\n def __init__(self, A, view=None):\n \"\"\"Constructor :param A: Full storage real symmetric matrix to be tridiagonalized (is not overwritten) :param view: Integer, if present selects a sub-matrix of A starting from the top-left element, up until the row/column n = view. This sub-matrix is then tridiagonalized instead of the full A\"\"\"\n if A.shape[0] != A.shape[1]:\n print('Matrix must be square')\n self.dim = A.shape[0]\n if view is None:\n view = self.dim\n else:\n assert view <= self.dim\n self._A_buf = A[:, :]\n self.n = view\n self.A = self._A_buf[:view, :view]\n\n def update_view(self, view):\n \"\"\"Updates the matrix view of A, which is tridiagonalized\"\"\"\n assert view <= self.dim\n self.n = view\n self.A = self._A_buf[:view, :view]\n\n def _construct_hessenberg(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements without computing the transformation matrix\"\"\"\n T = hessenberg(self.A)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag)\n\n def _construct_hessenberg_with_trafo(self, cutoff):\n \"\"\"Performs the tridiagonalization up to 'cutoff' diagonal elements, while also computing the transformation matrix\"\"\"\n T, Q = hessenberg(self.A, calc_q=True)\n diag = np.diag(T)[:cutoff]\n offdiag = np.diag(T, k=-1)[:cutoff - 1]\n return (diag, offdiag, Q[:, :cutoff])\n\n def _make_positive(self, diag, offdiag, trafo=None):\n \"\"\"Makes the offdiagonal elements of the tridiagonal positive. If no trafo matrix is passed, the absolute value of the offdiagonal elements is take, as there always exists a transformation that would perform the correct transformation :param diag: Diagonal elements of the triiagonal matrix :param offdiag: Offdiagonal elements of the tridiagonal matrix :param trafo: Transformation matrix (if not present see function description). If passed, the signs of the orthonormal column vectors are flipped to make the corresponding offdiagonal elements positive :return: diagonal elements of the triiagonal matrix, strictly positive offdiagonals, the corresponding transformation matrix (if trafo was passed a\"\"\"\n if trafo is None:\n return (diag, np.abs(offdiag))\n else:\n buffer = offdiag.copy()\n index = 0\n while index < len(buffer):\n if buffer[index] < 0:\n trafo[:, index + 1] = -trafo[:, index + 1]\n A_pos = trafo.T @ self.A @ trafo\n buffer = np.diag(A_pos, k=-1)\n index += 1\n assert np.all(buffer >= 0)\n return (diag, np.abs(offdiag), trafo)\n\n def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False, positive=True):\n \"\"\"Main interface. Computes the tridiagonal elements of the matrix A and returns them :param cutoff: How many diagonal elements for the resulting tridiagonal elements should be computed If left unchanged the entire matrix is tridiagonalized :param residual: If set True computes a residual. See utils.residual.orth_residual for details :param get_trafo: If set True computes and returns the transformaion matrix :param positive: If set True ensures that the offdiagonal elements are chosen to be positive :return: diag (diagonal elements of the computed tridiagonal matrix), offdiag (offdiagonal elements of the computed tridiagonal matrix), info dict with keys 'trafo' and 'res', which contain the corr\"\"\"\n if cutoff is None:\n cutoff = self.n\n else:\n assert 0 < cutoff <= self.n\n info = dict()\n info['trafo'] = None\n info['res'] = None\n if residual or get_trafo:\n diag, offdiag, Q = self._construct_hessenberg_with_trafo(cutoff)\n if positive and get_trafo:\n diag, offdiag, Q = self._make_positive(diag, offdiag, trafo=Q)\n else:\n diag, offdiag = self._make_positive(diag, offdiag)\n if get_trafo:\n info['trafo'] = Q\n if residual:\n info['res'] = orth_residual(Q)\n else:\n diag, offdiag = self._construct_hessenberg(cutoff)\n if positive:\n diag, offdiag = self._make_positive(diag, offdiag)\n return (diag, offdiag, info)\n", "source": "the_stack_v2_python_sparse", "source_path": "mapping/tridiag/scipy_hessenberg/full.py", "source_repo": "fhoeb/py-mapping", "split": "val", "star_events_count": 2} {"blob_id": "80bb998c8202fde111c8654035f600e86b966492", "bodies": ["iso8601_string = self._GetJSONValue(json_dict, name)\nif not iso8601_string:\n return None\nif name == 'FinishedAt' and iso8601_string == '0001-01-01T00:00:00Z':\n return None\ntry:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(iso8601_string)\nexcept ValueError as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse value: {0:s} ISO8601 string: {1:s} with error: {2!s}'.format(name, iso8601_string, exception))\n return None\nreturn date_time", "json_state = self._GetJSONValue(json_dict, 'State', default_value={})\nconfiguration = self._GetJSONValue(json_dict, 'Config', default_value={})\nevent_data = DockerContainerConfigurationEventData()\nevent_data.container_identifier = self._GetJSONValue(json_dict, 'ID')\nevent_data.container_name = self._GetJSONValue(configuration, 'Hostname', default_value='Unknown container name')\nevent_data.creation_time = self._ParseISO8601DateTimeString(parser_mediator, json_dict, 'Created')\nevent_data.end_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'FinishedAt')\nevent_data.start_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'StartedAt')\nparser_mediator.ProduceEventData(event_data)", "configuration = self._GetJSONValue(json_dict, 'Config')\ndriver = self._GetJSONValue(json_dict, 'Driver')\nidentifier = self._GetJSONValue(json_dict, 'ID')\nif None in (configuration, driver, identifier):\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n iso8601_string = self._GetJSONValue(json_dict, name)\n if not iso8601_string:\n return None\n if name == 'FinishedAt' and iso8601_string == '0001-01-01T00:00:00Z':\n return None\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(iso8601_string)\n except ValueError as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse value: {0:s} ISO8601 string: {1:s} with error: {2!s}'.format(name, iso8601_string, exception))\n return None\n return date_time\n<|end_body_0|>\n\n<|body_start_1|>\n json_state = self._GetJSONValue(json_dict, 'State', default_value={})\n configuration = self._GetJSONValue(json_dict, 'Config', default_value={})\n event_data = DockerContainerConfigurationEventData()\n event_data.container_identifier = self._GetJSONValue(json_dict, 'ID')\n event_data.container_name = self._GetJSONValue(configuration, 'Hostname', default_value='Unknown container name')\n event_data.creation_time = self._ParseISO8601DateTimeString(parser_mediator, json_dict, 'Created')\n event_data.end_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'FinishedAt')\n event_data.start_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'StartedAt')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n\n<|body_start_2|>\n configuration = self._GetJSONValue(json_dict, 'Config')\n driver = self._GetJSONValue(json_dict, 'Driver')\n identifier = self._GetJSONValue(json_dict, 'ID')\n if None in (configuration, driver, identifier):\n return False\n return True\n<|end_body_2|>\n", "class_docstring": "JSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json", "class_name": "DockerContainerConfigurationJSONLPlugin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DockerContainerConfigurationJSONLPlugin:\n \"\"\"JSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\"\"\"\n\n def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name):\n \"\"\"Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def _ParseRecord(self, parser_mediator, json_dict):\n \"\"\"Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\"\"\"\n <|body_1|>\n\n def CheckRequiredFormat(self, json_dict):\n \"\"\"Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n iso8601_string = self._GetJSONValue(json_dict, name)\n if not iso8601_string:\n return None\n if name == 'FinishedAt' and iso8601_string == '0001-01-01T00:00:00Z':\n return None\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(iso8601_string)\n except ValueError as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse value: {0:s} ISO8601 string: {1:s} with error: {2!s}'.format(name, iso8601_string, exception))\n return None\n return date_time\n<|end_body_0|>\n\n<|body_start_1|>\n json_state = self._GetJSONValue(json_dict, 'State', default_value={})\n configuration = self._GetJSONValue(json_dict, 'Config', default_value={})\n event_data = DockerContainerConfigurationEventData()\n event_data.container_identifier = self._GetJSONValue(json_dict, 'ID')\n event_data.container_name = self._GetJSONValue(configuration, 'Hostname', default_value='Unknown container name')\n event_data.creation_time = self._ParseISO8601DateTimeString(parser_mediator, json_dict, 'Created')\n event_data.end_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'FinishedAt')\n event_data.start_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'StartedAt')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n\n<|body_start_2|>\n configuration = self._GetJSONValue(json_dict, 'Config')\n driver = self._GetJSONValue(json_dict, 'Driver')\n identifier = self._GetJSONValue(json_dict, 'ID')\n if None in (configuration, driver, identifier):\n return False\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000205", "length_bytes": 4653, "license_type": "permissive", "methods": [{"docstring": "Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.", "name": "_ParseISO8601DateTimeString", "signature": "def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name)"}, {"docstring": "Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.", "name": "_ParseRecord", "signature": "def _ParseRecord(self, parser_mediator, json_dict)"}, {"docstring": "Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.", "name": "CheckRequiredFormat", "signature": "def CheckRequiredFormat(self, json_dict)"}], "n_methods": 3, "prompt": "Implement the Python class `DockerContainerConfigurationJSONLPlugin` described below.\n\nClass description:\nJSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\n\nMethod signatures and docstrings:\n- def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name): Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\n- def _ParseRecord(self, parser_mediator, json_dict): Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\n- def CheckRequiredFormat(self, json_dict): Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.", "prompted_full_text": "Implement the Python class `DockerContainerConfigurationJSONLPlugin` described below.\n\nClass description:\nJSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\n\nMethod signatures and docstrings:\n- def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name): Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\n- def _ParseRecord(self, parser_mediator, json_dict): Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\n- def CheckRequiredFormat(self, json_dict): Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.\n\n<|skeleton|>\nclass DockerContainerConfigurationJSONLPlugin:\n \"\"\"JSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\"\"\"\n\n def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name):\n \"\"\"Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def _ParseRecord(self, parser_mediator, json_dict):\n \"\"\"Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\"\"\"\n <|body_1|>\n\n def CheckRequiredFormat(self, json_dict):\n \"\"\"Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n iso8601_string = self._GetJSONValue(json_dict, name)\n if not iso8601_string:\n return None\n if name == 'FinishedAt' and iso8601_string == '0001-01-01T00:00:00Z':\n return None\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(iso8601_string)\n except ValueError as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse value: {0:s} ISO8601 string: {1:s} with error: {2!s}'.format(name, iso8601_string, exception))\n return None\n return date_time\n<|end_body_0|>\n\n<|body_start_1|>\n json_state = self._GetJSONValue(json_dict, 'State', default_value={})\n configuration = self._GetJSONValue(json_dict, 'Config', default_value={})\n event_data = DockerContainerConfigurationEventData()\n event_data.container_identifier = self._GetJSONValue(json_dict, 'ID')\n event_data.container_name = self._GetJSONValue(configuration, 'Hostname', default_value='Unknown container name')\n event_data.creation_time = self._ParseISO8601DateTimeString(parser_mediator, json_dict, 'Created')\n event_data.end_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'FinishedAt')\n event_data.start_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'StartedAt')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n\n<|body_start_2|>\n configuration = self._GetJSONValue(json_dict, 'Config')\n driver = self._GetJSONValue(json_dict, 'Driver')\n identifier = self._GetJSONValue(json_dict, 'ID')\n if None in (configuration, driver, identifier):\n return False\n return True\n<|end_body_2|>\n", "revision_id": "d6022f8cfebfddf2d08ab2d300a41b61f3349933", "skeleton": "<|skeleton|>\nclass DockerContainerConfigurationJSONLPlugin:\n \"\"\"JSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\"\"\"\n\n def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name):\n \"\"\"Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def _ParseRecord(self, parser_mediator, json_dict):\n \"\"\"Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\"\"\"\n <|body_1|>\n\n def CheckRequiredFormat(self, json_dict):\n \"\"\"Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DockerContainerConfigurationJSONLPlugin:\n \"\"\"JSON-L parser plugin for Docker container configuration files. This parser handles per Docker container configuration files stored in: DOCKER_DIR/containers//config.json\"\"\"\n\n def _ParseISO8601DateTimeString(self, parser_mediator, json_dict, name):\n \"\"\"Parses an ISO8601 date and time string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary. name (str): name of the value to retrieve. Returns: dfdatetime.TimeElementsInMicroseconds: date and time value or None if not available.\"\"\"\n iso8601_string = self._GetJSONValue(json_dict, name)\n if not iso8601_string:\n return None\n if name == 'FinishedAt' and iso8601_string == '0001-01-01T00:00:00Z':\n return None\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(iso8601_string)\n except ValueError as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse value: {0:s} ISO8601 string: {1:s} with error: {2!s}'.format(name, iso8601_string, exception))\n return None\n return date_time\n\n def _ParseRecord(self, parser_mediator, json_dict):\n \"\"\"Parses a Docker container configuration record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. json_dict (dict): JSON dictionary of the configuration record.\"\"\"\n json_state = self._GetJSONValue(json_dict, 'State', default_value={})\n configuration = self._GetJSONValue(json_dict, 'Config', default_value={})\n event_data = DockerContainerConfigurationEventData()\n event_data.container_identifier = self._GetJSONValue(json_dict, 'ID')\n event_data.container_name = self._GetJSONValue(configuration, 'Hostname', default_value='Unknown container name')\n event_data.creation_time = self._ParseISO8601DateTimeString(parser_mediator, json_dict, 'Created')\n event_data.end_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'FinishedAt')\n event_data.start_time = self._ParseISO8601DateTimeString(parser_mediator, json_state, 'StartedAt')\n parser_mediator.ProduceEventData(event_data)\n\n def CheckRequiredFormat(self, json_dict):\n \"\"\"Check if the record has the minimal structure required by the plugin. Args: json_dict (dict): JSON dictionary of the configuration record. Returns: bool: True if this is the correct parser, False otherwise.\"\"\"\n configuration = self._GetJSONValue(json_dict, 'Config')\n driver = self._GetJSONValue(json_dict, 'Driver')\n identifier = self._GetJSONValue(json_dict, 'ID')\n if None in (configuration, driver, identifier):\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "plaso/parsers/jsonl_plugins/docker_container_config.py", "source_repo": "log2timeline/plaso", "split": "val", "star_events_count": 1506} {"blob_id": "edb1fdf69cc56d077585116e38f053c168f82b0c", "bodies": ["super(DMCM, self).__init__()\nself.conv_net = cfg.get_image_net(mode)\nself.sparse_net = cfg.get_genes_net(mode)\nself.conv_net.apply(_init_weights_xavier)", "x1, x2 = x\ny1 = self.conv_net.forward(x1)\ny2 = self.sparse_net.forward(x2)\nreturn (y1, y2)"], "bodies_text": "<|body_start_0|>\n super(DMCM, self).__init__()\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n self.conv_net.apply(_init_weights_xavier)\n<|end_body_0|>\n\n<|body_start_1|>\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return (y1, y2)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DMCM", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DMCM:\n\n def __init__(self, mode, cfg):\n \"\"\"Initialize model for Deep Multimodal Correlation Maximization.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Perform forward pass of images and associated signal through model. Output embeddings y1, y2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DMCM, self).__init__()\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n self.conv_net.apply(_init_weights_xavier)\n<|end_body_0|>\n\n<|body_start_1|>\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return (y1, y2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000206", "length_bytes": 1803, "license_type": "no_license", "methods": [{"docstring": "Initialize model for Deep Multimodal Correlation Maximization.", "name": "__init__", "signature": "def __init__(self, mode, cfg)"}, {"docstring": "Perform forward pass of images and associated signal through model. Output embeddings y1, y2.", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005026", "prompt": "Implement the Python class `DMCM` described below.\n\nClass description:\nImplement the DMCM class.\n\nMethod signatures and docstrings:\n- def __init__(self, mode, cfg): Initialize model for Deep Multimodal Correlation Maximization.\n- def forward(self, x): Perform forward pass of images and associated signal through model. Output embeddings y1, y2.", "prompted_full_text": "Implement the Python class `DMCM` described below.\n\nClass description:\nImplement the DMCM class.\n\nMethod signatures and docstrings:\n- def __init__(self, mode, cfg): Initialize model for Deep Multimodal Correlation Maximization.\n- def forward(self, x): Perform forward pass of images and associated signal through model. Output embeddings y1, y2.\n\n<|skeleton|>\nclass DMCM:\n\n def __init__(self, mode, cfg):\n \"\"\"Initialize model for Deep Multimodal Correlation Maximization.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Perform forward pass of images and associated signal through model. Output embeddings y1, y2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DMCM, self).__init__()\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n self.conv_net.apply(_init_weights_xavier)\n<|end_body_0|>\n\n<|body_start_1|>\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return (y1, y2)\n<|end_body_1|>\n", "revision_id": "1b65fc0c3ec6b182907ba070e859c1d92fc98942", "skeleton": "<|skeleton|>\nclass DMCM:\n\n def __init__(self, mode, cfg):\n \"\"\"Initialize model for Deep Multimodal Correlation Maximization.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Perform forward pass of images and associated signal through model. Output embeddings y1, y2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DMCM:\n def __init__(self, mode, cfg):\n \"\"\"Initialize model for Deep Multimodal Correlation Maximization.\"\"\"\n super(DMCM, self).__init__()\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n self.conv_net.apply(_init_weights_xavier)\n\n def forward(self, x):\n \"\"\"Perform forward pass of images and associated signal through model. Output embeddings y1, y2.\"\"\"\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return (y1, y2)\n", "source": "the_stack_v2_python_sparse", "source_path": "models/dmcm.py", "source_repo": "KaiqianZhang/dpcca_v8", "split": "val", "star_events_count": 1} {"blob_id": "7652bc8c9b4d40ab67a82c9f5a74fb24a749d89e", "bodies": ["mock_zhifu.return_value = {'result': 'success', 'reason': 'null'}\nstatues = temple.zhifu_statues()\nprint(statues)\nself.assertEqual(statues, '支付成功')", "mock_zhifu.return_value = {'result': 'fail', 'reason': '余额不足'}\nstatues = temple.zhifu_statues()\nself.assertEqual(statues, '支付失败')"], "bodies_text": "<|body_start_0|>\n mock_zhifu.return_value = {'result': 'success', 'reason': 'null'}\n statues = temple.zhifu_statues()\n print(statues)\n self.assertEqual(statues, '支付成功')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_zhifu.return_value = {'result': 'fail', 'reason': '余额不足'}\n statues = temple.zhifu_statues()\n self.assertEqual(statues, '支付失败')\n<|end_body_1|>\n", "class_docstring": "单元测试用例", "class_name": "Test_zhifu_statues_withpatch", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_zhifu_statues_withpatch:\n \"\"\"单元测试用例\"\"\"\n\n def test_01(self, mock_zhifu):\n \"\"\"测试支付成功场景\"\"\"\n <|body_0|>\n\n def test_02(self, mock_zhifu):\n \"\"\"测试支付失败场景\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mock_zhifu.return_value = {'result': 'success', 'reason': 'null'}\n statues = temple.zhifu_statues()\n print(statues)\n self.assertEqual(statues, '支付成功')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_zhifu.return_value = {'result': 'fail', 'reason': '余额不足'}\n statues = temple.zhifu_statues()\n self.assertEqual(statues, '支付失败')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000207", "length_bytes": 4862, "license_type": "no_license", "methods": [{"docstring": "测试支付成功场景", "name": "test_01", "signature": "def test_01(self, mock_zhifu)"}, {"docstring": "测试支付失败场景", "name": "test_02", "signature": "def test_02(self, mock_zhifu)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000251", "prompt": "Implement the Python class `Test_zhifu_statues_withpatch` described below.\n\nClass description:\n单元测试用例\n\nMethod signatures and docstrings:\n- def test_01(self, mock_zhifu): 测试支付成功场景\n- def test_02(self, mock_zhifu): 测试支付失败场景", "prompted_full_text": "Implement the Python class `Test_zhifu_statues_withpatch` described below.\n\nClass description:\n单元测试用例\n\nMethod signatures and docstrings:\n- def test_01(self, mock_zhifu): 测试支付成功场景\n- def test_02(self, mock_zhifu): 测试支付失败场景\n\n<|skeleton|>\nclass Test_zhifu_statues_withpatch:\n \"\"\"单元测试用例\"\"\"\n\n def test_01(self, mock_zhifu):\n \"\"\"测试支付成功场景\"\"\"\n <|body_0|>\n\n def test_02(self, mock_zhifu):\n \"\"\"测试支付失败场景\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mock_zhifu.return_value = {'result': 'success', 'reason': 'null'}\n statues = temple.zhifu_statues()\n print(statues)\n self.assertEqual(statues, '支付成功')\n<|end_body_0|>\n\n<|body_start_1|>\n mock_zhifu.return_value = {'result': 'fail', 'reason': '余额不足'}\n statues = temple.zhifu_statues()\n self.assertEqual(statues, '支付失败')\n<|end_body_1|>\n", "revision_id": "a58fdcc3eb0b52c94e50a110b4f1a053c6fa0ab2", "skeleton": "<|skeleton|>\nclass Test_zhifu_statues_withpatch:\n \"\"\"单元测试用例\"\"\"\n\n def test_01(self, mock_zhifu):\n \"\"\"测试支付成功场景\"\"\"\n <|body_0|>\n\n def test_02(self, mock_zhifu):\n \"\"\"测试支付失败场景\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test_zhifu_statues_withpatch:\n \"\"\"单元测试用例\"\"\"\n\n def test_01(self, mock_zhifu):\n \"\"\"测试支付成功场景\"\"\"\n mock_zhifu.return_value = {'result': 'success', 'reason': 'null'}\n statues = temple.zhifu_statues()\n print(statues)\n self.assertEqual(statues, '支付成功')\n\n def test_02(self, mock_zhifu):\n \"\"\"测试支付失败场景\"\"\"\n mock_zhifu.return_value = {'result': 'fail', 'reason': '余额不足'}\n statues = temple.zhifu_statues()\n self.assertEqual(statues, '支付失败')\n", "source": "the_stack_v2_python_sparse", "source_path": "testcase/test_temple.py", "source_repo": "yangyilin182/IotInterFace", "split": "val", "star_events_count": 0} {"blob_id": "d492e81a2db18bc848c6b184fa503cf514a7d001", "bodies": ["self.time = 0\nself.time_old = 0\nif initial_current < 0:\n initial_current = 0\nif initial_voltage < 0:\n initial_voltage = 0\nself.current_old = initial_current\nself.voltage_old = initial_voltage\nself.current = initial_current\nself.voltage = initial_voltage", "self.time_old = self.time\nif time_new < 0:\n self.time = self.time\nelse:\n self.time = time_new\nself.current_old = self.current\nif new_current < 0:\n self.current = self.current\nelse:\n self.current = new_current\nself.voltage_old = self.voltage\nif new_voltage < 0:\n self.voltage = self.voltage\nelse:\n self.voltage = new_voltage\nenergy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\nreturn energy_consumed"], "bodies_text": "<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuxSystem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000208", "length_bytes": 1950, "license_type": "no_license", "methods": [{"docstring": "Initialize Auxiliary System object", "name": "__init__", "signature": "def __init__(self, initial_current, initial_voltage)"}, {"docstring": "Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)", "name": "energy_consumed", "signature": "def energy_consumed(self, time_new, new_current, new_voltage)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001936", "prompt": "Implement the Python class `AuxSystem` described below.\n\nClass description:\nImplement the AuxSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_current, initial_voltage): Initialize Auxiliary System object\n- def energy_consumed(self, time_new, new_current, new_voltage): Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)", "prompted_full_text": "Implement the Python class `AuxSystem` described below.\n\nClass description:\nImplement the AuxSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_current, initial_voltage): Initialize Auxiliary System object\n- def energy_consumed(self, time_new, new_current, new_voltage): Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\n\n<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "revision_id": "73cb4b6e42d8b0b83ccde98affb6bc8abd13549b", "skeleton": "<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AuxSystem:\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n", "source": "the_stack_v2_python_sparse", "source_path": "auxloss/auxsystem.py", "source_repo": "uw-midsun/strategy", "split": "val", "star_events_count": 19} {"blob_id": "7695c16a2268e1a0c635dcb9e2d72cad3ce82e00", "bodies": ["cnt = 0\nk = 0\nwhile True:\n k += 1\n x0k = N - k * (k - 1) // 2\n if x0k <= 0:\n break\n if x0k % k == 0:\n cnt += 1\nreturn cnt", "cnt = 0\nfor k in range(1, int(N ** 0.5)):\n x0k = N - k * (k - 1) // 2\n if x0k % k == 0:\n cnt += 1\nreturn cnt", "if N == 1:\n return 1\ncnt = 0\nfor i in range(1, N):\n d = N // i\n r = N % i\n if r == 0 and d - i // 2 > 0:\n cnt += 1\n elif r == 1 and N == (d + d + 1) * i // 2:\n cnt += 1\nreturn cnt"], "bodies_text": "<|body_start_0|>\n cnt = 0\n k = 0\n while True:\n k += 1\n x0k = N - k * (k - 1) // 2\n if x0k <= 0:\n break\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for k in range(1, int(N ** 0.5)):\n x0k = N - k * (k - 1) // 2\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 1:\n return 1\n cnt = 0\n for i in range(1, N):\n d = N // i\n r = N % i\n if r == 0 and d - i // 2 > 0:\n cnt += 1\n elif r == 1 and N == (d + d + 1) * i // 2:\n cnt += 1\n return cnt\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def consecutiveNumbersSum(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_1|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"factor related 9 / 3 = 3\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n k = 0\n while True:\n k += 1\n x0k = N - k * (k - 1) // 2\n if x0k <= 0:\n break\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for k in range(1, int(N ** 0.5)):\n x0k = N - k * (k - 1) // 2\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 1:\n return 1\n cnt = 0\n for i in range(1, N):\n d = N // i\n r = N % i\n if r == 0 and d - i // 2 > 0:\n cnt += 1\n elif r == 1 and N == (d + d + 1) * i // 2:\n cnt += 1\n return cnt\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000209", "length_bytes": 1863, "license_type": "no_license", "methods": [{"docstring": "Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility", "name": "consecutiveNumbersSum", "signature": "def consecutiveNumbersSum(self, N: int) -> int"}, {"docstring": "Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility", "name": "consecutiveNumbersSum_error", "signature": "def consecutiveNumbersSum_error(self, N: int) -> int"}, {"docstring": "factor related 9 / 3 = 3", "name": "consecutiveNumbersSum_error", "signature": "def consecutiveNumbersSum_error(self, N: int) -> int"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001017", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def consecutiveNumbersSum(self, N: int) -> int: Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\n- def consecutiveNumbersSum_error(self, N: int) -> int: Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\n- def consecutiveNumbersSum_error(self, N: int) -> int: factor related 9 / 3 = 3", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def consecutiveNumbersSum(self, N: int) -> int: Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\n- def consecutiveNumbersSum_error(self, N: int) -> int: Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\n- def consecutiveNumbersSum_error(self, N: int) -> int: factor related 9 / 3 = 3\n\n<|skeleton|>\nclass Solution:\n\n def consecutiveNumbersSum(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_1|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"factor related 9 / 3 = 3\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n k = 0\n while True:\n k += 1\n x0k = N - k * (k - 1) // 2\n if x0k <= 0:\n break\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n cnt = 0\n for k in range(1, int(N ** 0.5)):\n x0k = N - k * (k - 1) // 2\n if x0k % k == 0:\n cnt += 1\n return cnt\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 1:\n return 1\n cnt = 0\n for i in range(1, N):\n d = N // i\n r = N % i\n if r == 0 and d - i // 2 > 0:\n cnt += 1\n elif r == 1 and N == (d + d + 1) * i // 2:\n cnt += 1\n return cnt\n<|end_body_2|>\n", "revision_id": "929dde1723fb2f54870c8a9badc80fc23e8400d3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def consecutiveNumbersSum(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_0|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n <|body_1|>\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"factor related 9 / 3 = 3\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def consecutiveNumbersSum(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n cnt = 0\n k = 0\n while True:\n k += 1\n x0k = N - k * (k - 1) // 2\n if x0k <= 0:\n break\n if x0k % k == 0:\n cnt += 1\n return cnt\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"Arithmetic Array math (x0 + xn) * (xn - x0 + 1) / 2 = N xn = x0 + k - 1 (2x0 + k - 1) * k / 2 = N 2x0 = 2N / k - k + 1 x0 * k = N - k * (k - 1) / 2 # assure for divisibility\"\"\"\n cnt = 0\n for k in range(1, int(N ** 0.5)):\n x0k = N - k * (k - 1) // 2\n if x0k % k == 0:\n cnt += 1\n return cnt\n\n def consecutiveNumbersSum_error(self, N: int) -> int:\n \"\"\"factor related 9 / 3 = 3\"\"\"\n if N == 1:\n return 1\n cnt = 0\n for i in range(1, N):\n d = N // i\n r = N % i\n if r == 0 and d - i // 2 > 0:\n cnt += 1\n elif r == 1 and N == (d + d + 1) * i // 2:\n cnt += 1\n return cnt\n", "source": "the_stack_v2_python_sparse", "source_path": "_algorithms_challenges/leetcode/LeetCode/829 Consecutive Numbers Sum.py", "source_repo": "syurskyi/Algorithms_and_Data_Structure", "split": "val", "star_events_count": 4} {"blob_id": "4c63fbd7c64251ffc4ab2e8b269460ee951a99fd", "bodies": ["queryset = model_admin.get_queryset(request)\nresults = queryset.values_list('country').order_by('country').distinct()\ndata = ((code[0] or 'none', dict(COUNTRIES).get(code[0], _('None'))) for code in results if code[0] not in ['None', ''])\nreturn data", "value = self.value()\nif value == 'none':\n return queryset.filter(country='')\nelif value:\n return queryset.filter(country=value)"], "bodies_text": "<|body_start_0|>\n queryset = model_admin.get_queryset(request)\n results = queryset.values_list('country').order_by('country').distinct()\n data = ((code[0] or 'none', dict(COUNTRIES).get(code[0], _('None'))) for code in results if code[0] not in ['None', ''])\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.value()\n if value == 'none':\n return queryset.filter(country='')\n elif value:\n return queryset.filter(country=value)\n<|end_body_1|>\n", "class_docstring": "Filtre admin des pays des IPs", "class_name": "IPCountryFilter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IPCountryFilter:\n \"\"\"Filtre admin des pays des IPs\"\"\"\n\n def lookups(self, request, model_admin):\n \"\"\"Renvoyer les options des pays\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Filtrer le queryset par le pays sélectionné\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queryset = model_admin.get_queryset(request)\n results = queryset.values_list('country').order_by('country').distinct()\n data = ((code[0] or 'none', dict(COUNTRIES).get(code[0], _('None'))) for code in results if code[0] not in ['None', ''])\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.value()\n if value == 'none':\n return queryset.filter(country='')\n elif value:\n return queryset.filter(country=value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000210", "length_bytes": 1802, "license_type": "no_license", "methods": [{"docstring": "Renvoyer les options des pays", "name": "lookups", "signature": "def lookups(self, request, model_admin)"}, {"docstring": "Filtrer le queryset par le pays sélectionné", "name": "queryset", "signature": "def queryset(self, request, queryset)"}], "n_methods": 2, "prompt": "Implement the Python class `IPCountryFilter` described below.\n\nClass description:\nFiltre admin des pays des IPs\n\nMethod signatures and docstrings:\n- def lookups(self, request, model_admin): Renvoyer les options des pays\n- def queryset(self, request, queryset): Filtrer le queryset par le pays sélectionné", "prompted_full_text": "Implement the Python class `IPCountryFilter` described below.\n\nClass description:\nFiltre admin des pays des IPs\n\nMethod signatures and docstrings:\n- def lookups(self, request, model_admin): Renvoyer les options des pays\n- def queryset(self, request, queryset): Filtrer le queryset par le pays sélectionné\n\n<|skeleton|>\nclass IPCountryFilter:\n \"\"\"Filtre admin des pays des IPs\"\"\"\n\n def lookups(self, request, model_admin):\n \"\"\"Renvoyer les options des pays\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Filtrer le queryset par le pays sélectionné\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queryset = model_admin.get_queryset(request)\n results = queryset.values_list('country').order_by('country').distinct()\n data = ((code[0] or 'none', dict(COUNTRIES).get(code[0], _('None'))) for code in results if code[0] not in ['None', ''])\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.value()\n if value == 'none':\n return queryset.filter(country='')\n elif value:\n return queryset.filter(country=value)\n<|end_body_1|>\n", "revision_id": "8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7", "skeleton": "<|skeleton|>\nclass IPCountryFilter:\n \"\"\"Filtre admin des pays des IPs\"\"\"\n\n def lookups(self, request, model_admin):\n \"\"\"Renvoyer les options des pays\"\"\"\n <|body_0|>\n\n def queryset(self, request, queryset):\n \"\"\"Filtrer le queryset par le pays sélectionné\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IPCountryFilter:\n \"\"\"Filtre admin des pays des IPs\"\"\"\n\n def lookups(self, request, model_admin):\n \"\"\"Renvoyer les options des pays\"\"\"\n queryset = model_admin.get_queryset(request)\n results = queryset.values_list('country').order_by('country').distinct()\n data = ((code[0] or 'none', dict(COUNTRIES).get(code[0], _('None'))) for code in results if code[0] not in ['None', ''])\n return data\n\n def queryset(self, request, queryset):\n \"\"\"Filtrer le queryset par le pays sélectionné\"\"\"\n value = self.value()\n if value == 'none':\n return queryset.filter(country='')\n elif value:\n return queryset.filter(country=value)\n", "source": "the_stack_v2_python_sparse", "source_path": "scoop/user/access/admin/filters.py", "source_repo": "artscoop/scoop", "split": "val", "star_events_count": 0} {"blob_id": "0128e38667ef8026c1aa51c0d2c8516097af9435", "bodies": ["if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n user = UserFactory.get_user(request)\n if user != None and appointment.can_view(user):\n appointment_update_form = self.update_form_class(None)\n form_data = {'description': appointment.description, 'location': appointment.location, 'date': appointment.date, 'time': appointment.time, 'endTime': appointment.endTime}\n appointment_update_form = self.update_form_class(initial=form_data)\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n else:\n return HttpResponse('You tried editing an appointment that is not yours')\nreturn redirect('/userprofile/login')", "if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n appointment_update_form = self.update_form_class(request.POST, instance=appointment)\n if appointment_update_form.is_valid():\n appointment.save()\n logEntry = Entry()\n logEntry.user = request.user.username\n logEntry.trigger = 'appointment.views.AppointmentEdit'\n logEntry.activity = 'Updated Appointment'\n logEntry.save()\n return redirect('/employee/appointment/view')\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\nreturn redirect('/userprofile/login')"], "bodies_text": "<|body_start_0|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n user = UserFactory.get_user(request)\n if user != None and appointment.can_view(user):\n appointment_update_form = self.update_form_class(None)\n form_data = {'description': appointment.description, 'location': appointment.location, 'date': appointment.date, 'time': appointment.time, 'endTime': appointment.endTime}\n appointment_update_form = self.update_form_class(initial=form_data)\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n else:\n return HttpResponse('You tried editing an appointment that is not yours')\n return redirect('/userprofile/login')\n<|end_body_0|>\n\n<|body_start_1|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n appointment_update_form = self.update_form_class(request.POST, instance=appointment)\n if appointment_update_form.is_valid():\n appointment.save()\n logEntry = Entry()\n logEntry.user = request.user.username\n logEntry.trigger = 'appointment.views.AppointmentEdit'\n logEntry.activity = 'Updated Appointment'\n logEntry.save()\n return redirect('/employee/appointment/view')\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n return redirect('/userprofile/login')\n<|end_body_1|>\n", "class_docstring": "Edit existing appointments using the UpdateAppointmentsForm", "class_name": "DoctorAppointmentEdit", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DoctorAppointmentEdit:\n \"\"\"Edit existing appointments using the UpdateAppointmentsForm\"\"\"\n\n def get(self, request, appointment_id):\n \"\"\"When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\"\"\"\n <|body_0|>\n\n def post(self, request, appointment_id):\n \"\"\"Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n user = UserFactory.get_user(request)\n if user != None and appointment.can_view(user):\n appointment_update_form = self.update_form_class(None)\n form_data = {'description': appointment.description, 'location': appointment.location, 'date': appointment.date, 'time': appointment.time, 'endTime': appointment.endTime}\n appointment_update_form = self.update_form_class(initial=form_data)\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n else:\n return HttpResponse('You tried editing an appointment that is not yours')\n return redirect('/userprofile/login')\n<|end_body_0|>\n\n<|body_start_1|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n appointment_update_form = self.update_form_class(request.POST, instance=appointment)\n if appointment_update_form.is_valid():\n appointment.save()\n logEntry = Entry()\n logEntry.user = request.user.username\n logEntry.trigger = 'appointment.views.AppointmentEdit'\n logEntry.activity = 'Updated Appointment'\n logEntry.save()\n return redirect('/employee/appointment/view')\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n return redirect('/userprofile/login')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000211", "length_bytes": 21300, "license_type": "no_license", "methods": [{"docstring": "When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message", "name": "get", "signature": "def get(self, request, appointment_id)"}, {"docstring": "Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page", "name": "post", "signature": "def post(self, request, appointment_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005890", "prompt": "Implement the Python class `DoctorAppointmentEdit` described below.\n\nClass description:\nEdit existing appointments using the UpdateAppointmentsForm\n\nMethod signatures and docstrings:\n- def get(self, request, appointment_id): When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\n- def post(self, request, appointment_id): Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page", "prompted_full_text": "Implement the Python class `DoctorAppointmentEdit` described below.\n\nClass description:\nEdit existing appointments using the UpdateAppointmentsForm\n\nMethod signatures and docstrings:\n- def get(self, request, appointment_id): When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\n- def post(self, request, appointment_id): Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page\n\n<|skeleton|>\nclass DoctorAppointmentEdit:\n \"\"\"Edit existing appointments using the UpdateAppointmentsForm\"\"\"\n\n def get(self, request, appointment_id):\n \"\"\"When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\"\"\"\n <|body_0|>\n\n def post(self, request, appointment_id):\n \"\"\"Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n user = UserFactory.get_user(request)\n if user != None and appointment.can_view(user):\n appointment_update_form = self.update_form_class(None)\n form_data = {'description': appointment.description, 'location': appointment.location, 'date': appointment.date, 'time': appointment.time, 'endTime': appointment.endTime}\n appointment_update_form = self.update_form_class(initial=form_data)\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n else:\n return HttpResponse('You tried editing an appointment that is not yours')\n return redirect('/userprofile/login')\n<|end_body_0|>\n\n<|body_start_1|>\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n appointment_update_form = self.update_form_class(request.POST, instance=appointment)\n if appointment_update_form.is_valid():\n appointment.save()\n logEntry = Entry()\n logEntry.user = request.user.username\n logEntry.trigger = 'appointment.views.AppointmentEdit'\n logEntry.activity = 'Updated Appointment'\n logEntry.save()\n return redirect('/employee/appointment/view')\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n return redirect('/userprofile/login')\n<|end_body_1|>\n", "revision_id": "75cddb44ee24e1ec9916379b80739525dcee721c", "skeleton": "<|skeleton|>\nclass DoctorAppointmentEdit:\n \"\"\"Edit existing appointments using the UpdateAppointmentsForm\"\"\"\n\n def get(self, request, appointment_id):\n \"\"\"When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\"\"\"\n <|body_0|>\n\n def post(self, request, appointment_id):\n \"\"\"Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DoctorAppointmentEdit:\n \"\"\"Edit existing appointments using the UpdateAppointmentsForm\"\"\"\n\n def get(self, request, appointment_id):\n \"\"\"When loading the page, the fields should be filled with their existing values :param request: The user who is requesting the page :param appointment_id: The unique id of the appointment to be edited :return: If the request is valid, a page that has a form loaded with the current appointment information. Otherwise, an HttpResponse with an error message\"\"\"\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n user = UserFactory.get_user(request)\n if user != None and appointment.can_view(user):\n appointment_update_form = self.update_form_class(None)\n form_data = {'description': appointment.description, 'location': appointment.location, 'date': appointment.date, 'time': appointment.time, 'endTime': appointment.endTime}\n appointment_update_form = self.update_form_class(initial=form_data)\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n else:\n return HttpResponse('You tried editing an appointment that is not yours')\n return redirect('/userprofile/login')\n\n def post(self, request, appointment_id):\n \"\"\"Submitting the for will update the current appointment :param request: The user who is requesting the page :param appointment_id: The unique id of the Appointment to be updated :return: If there form is valid, redirect the user to the views page. If the form is not valid, re-render the page\"\"\"\n if request.user.is_authenticated():\n appointment = Appointment.objects.get(id=appointment_id)\n appointment_update_form = self.update_form_class(request.POST, instance=appointment)\n if appointment_update_form.is_valid():\n appointment.save()\n logEntry = Entry()\n logEntry.user = request.user.username\n logEntry.trigger = 'appointment.views.AppointmentEdit'\n logEntry.activity = 'Updated Appointment'\n logEntry.save()\n return redirect('/employee/appointment/view')\n return render(request, self.template_name, {'appointment_update_form': appointment_update_form})\n return redirect('/userprofile/login')\n", "source": "the_stack_v2_python_sparse", "source_path": "employee/views.py", "source_repo": "kevuno/HealthNet", "split": "val", "star_events_count": 0} {"blob_id": "2a5d9ab8b81474b26ddf5d86e0407a644c3a480f", "bodies": ["self.n = model.n\nself.probs = probs = dict()\nself.sorted_probs = dict()\npre = [elem for elem in model.counts.keys() if not len(elem) == self.n]\nsuf = [elem for elem in model.counts.keys() if len(elem) == self.n]\nfor elem in suf:\n prfx = elem[:-1]\n sfx = elem[-1]\n if prfx in probs:\n aux = probs[prfx]\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n probs[prfx].update(aux)\n else:\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\nsp = [list(probs[x].items()) for x in pre]\nself.sorted_probs = {pre[i]: sorted(sp[i], key=lambda x: (-x[1], x[0])) for i in range(len(sp))}", "n = self.n\nsent = ('',) * (n - 1)\nif n == 1:\n sent = ()\nwhile '' not in sent:\n sent += (self.generate_token(sent[-n + 1:]),)\nreturn sent[n - 1:-1]", "n = self.n\nif n == 1:\n prev_tokens = tuple()\np = random()\nres = ''\nchoices = self.sorted_probs[prev_tokens]\nacc = choices[0][1]\nfor i in range(0, len(choices)):\n if p < acc:\n res = choices[i][0]\n break\n else:\n acc += choices[i][1]\nreturn res"], "bodies_text": "<|body_start_0|>\n self.n = model.n\n self.probs = probs = dict()\n self.sorted_probs = dict()\n pre = [elem for elem in model.counts.keys() if not len(elem) == self.n]\n suf = [elem for elem in model.counts.keys() if len(elem) == self.n]\n for elem in suf:\n prfx = elem[:-1]\n sfx = elem[-1]\n if prfx in probs:\n aux = probs[prfx]\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n probs[prfx].update(aux)\n else:\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n sp = [list(probs[x].items()) for x in pre]\n self.sorted_probs = {pre[i]: sorted(sp[i], key=lambda x: (-x[1], x[0])) for i in range(len(sp))}\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.n\n sent = ('',) * (n - 1)\n if n == 1:\n sent = ()\n while '' not in sent:\n sent += (self.generate_token(sent[-n + 1:]),)\n return sent[n - 1:-1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = self.n\n if n == 1:\n prev_tokens = tuple()\n p = random()\n res = ''\n choices = self.sorted_probs[prev_tokens]\n acc = choices[0][1]\n for i in range(0, len(choices)):\n if p < acc:\n res = choices[i][0]\n break\n else:\n acc += choices[i][1]\n return res\n<|end_body_2|>\n", "class_docstring": "n-gram generator.", "class_name": "NGramGenerator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NGramGenerator:\n \"\"\"n-gram generator.\"\"\"\n\n def __init__(self, model):\n \"\"\"model -- n-gram model.\"\"\"\n <|body_0|>\n\n def generate_sent(self):\n \"\"\"Randomly generate a sentence.\"\"\"\n <|body_1|>\n\n def generate_token(self, prev_tokens=None):\n \"\"\"Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n = model.n\n self.probs = probs = dict()\n self.sorted_probs = dict()\n pre = [elem for elem in model.counts.keys() if not len(elem) == self.n]\n suf = [elem for elem in model.counts.keys() if len(elem) == self.n]\n for elem in suf:\n prfx = elem[:-1]\n sfx = elem[-1]\n if prfx in probs:\n aux = probs[prfx]\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n probs[prfx].update(aux)\n else:\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n sp = [list(probs[x].items()) for x in pre]\n self.sorted_probs = {pre[i]: sorted(sp[i], key=lambda x: (-x[1], x[0])) for i in range(len(sp))}\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.n\n sent = ('',) * (n - 1)\n if n == 1:\n sent = ()\n while '' not in sent:\n sent += (self.generate_token(sent[-n + 1:]),)\n return sent[n - 1:-1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = self.n\n if n == 1:\n prev_tokens = tuple()\n p = random()\n res = ''\n choices = self.sorted_probs[prev_tokens]\n acc = choices[0][1]\n for i in range(0, len(choices)):\n if p < acc:\n res = choices[i][0]\n break\n else:\n acc += choices[i][1]\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000212", "length_bytes": 2740, "license_type": "permissive", "methods": [{"docstring": "model -- n-gram model.", "name": "__init__", "signature": "def __init__(self, model)"}, {"docstring": "Randomly generate a sentence.", "name": "generate_sent", "signature": "def generate_sent(self)"}, {"docstring": "Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).", "name": "generate_token", "signature": "def generate_token(self, prev_tokens=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004285", "prompt": "Implement the Python class `NGramGenerator` described below.\n\nClass description:\nn-gram generator.\n\nMethod signatures and docstrings:\n- def __init__(self, model): model -- n-gram model.\n- def generate_sent(self): Randomly generate a sentence.\n- def generate_token(self, prev_tokens=None): Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).", "prompted_full_text": "Implement the Python class `NGramGenerator` described below.\n\nClass description:\nn-gram generator.\n\nMethod signatures and docstrings:\n- def __init__(self, model): model -- n-gram model.\n- def generate_sent(self): Randomly generate a sentence.\n- def generate_token(self, prev_tokens=None): Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).\n\n<|skeleton|>\nclass NGramGenerator:\n \"\"\"n-gram generator.\"\"\"\n\n def __init__(self, model):\n \"\"\"model -- n-gram model.\"\"\"\n <|body_0|>\n\n def generate_sent(self):\n \"\"\"Randomly generate a sentence.\"\"\"\n <|body_1|>\n\n def generate_token(self, prev_tokens=None):\n \"\"\"Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n = model.n\n self.probs = probs = dict()\n self.sorted_probs = dict()\n pre = [elem for elem in model.counts.keys() if not len(elem) == self.n]\n suf = [elem for elem in model.counts.keys() if len(elem) == self.n]\n for elem in suf:\n prfx = elem[:-1]\n sfx = elem[-1]\n if prfx in probs:\n aux = probs[prfx]\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n probs[prfx].update(aux)\n else:\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n sp = [list(probs[x].items()) for x in pre]\n self.sorted_probs = {pre[i]: sorted(sp[i], key=lambda x: (-x[1], x[0])) for i in range(len(sp))}\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.n\n sent = ('',) * (n - 1)\n if n == 1:\n sent = ()\n while '' not in sent:\n sent += (self.generate_token(sent[-n + 1:]),)\n return sent[n - 1:-1]\n<|end_body_1|>\n\n<|body_start_2|>\n n = self.n\n if n == 1:\n prev_tokens = tuple()\n p = random()\n res = ''\n choices = self.sorted_probs[prev_tokens]\n acc = choices[0][1]\n for i in range(0, len(choices)):\n if p < acc:\n res = choices[i][0]\n break\n else:\n acc += choices[i][1]\n return res\n<|end_body_2|>\n", "revision_id": "cb163f203ae3ce21d210d7751c457b18443e43d0", "skeleton": "<|skeleton|>\nclass NGramGenerator:\n \"\"\"n-gram generator.\"\"\"\n\n def __init__(self, model):\n \"\"\"model -- n-gram model.\"\"\"\n <|body_0|>\n\n def generate_sent(self):\n \"\"\"Randomly generate a sentence.\"\"\"\n <|body_1|>\n\n def generate_token(self, prev_tokens=None):\n \"\"\"Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NGramGenerator:\n \"\"\"n-gram generator.\"\"\"\n\n def __init__(self, model):\n \"\"\"model -- n-gram model.\"\"\"\n self.n = model.n\n self.probs = probs = dict()\n self.sorted_probs = dict()\n pre = [elem for elem in model.counts.keys() if not len(elem) == self.n]\n suf = [elem for elem in model.counts.keys() if len(elem) == self.n]\n for elem in suf:\n prfx = elem[:-1]\n sfx = elem[-1]\n if prfx in probs:\n aux = probs[prfx]\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n probs[prfx].update(aux)\n else:\n probs[prfx] = {sfx: model.cond_prob(sfx, prfx)}\n sp = [list(probs[x].items()) for x in pre]\n self.sorted_probs = {pre[i]: sorted(sp[i], key=lambda x: (-x[1], x[0])) for i in range(len(sp))}\n\n def generate_sent(self):\n \"\"\"Randomly generate a sentence.\"\"\"\n n = self.n\n sent = ('',) * (n - 1)\n if n == 1:\n sent = ()\n while '' not in sent:\n sent += (self.generate_token(sent[-n + 1:]),)\n return sent[n - 1:-1]\n\n def generate_token(self, prev_tokens=None):\n \"\"\"Randomly generate a token, given prev_tokens. prev_tokens -- the previous n-1 tokens (optional only if n = 1).\"\"\"\n n = self.n\n if n == 1:\n prev_tokens = tuple()\n p = random()\n res = ''\n choices = self.sorted_probs[prev_tokens]\n acc = choices[0][1]\n for i in range(0, len(choices)):\n if p < acc:\n res = choices[i][0]\n break\n else:\n acc += choices[i][1]\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "pagi/utils/ngram/ngram_generator.py", "source_repo": "yoelm/pagi", "split": "val", "star_events_count": 0} {"blob_id": "80d599a72b2874f7c907b3cc362274669670f885", "bodies": ["super().__init__(input_key, output_key or input_key)\nself.dtype = dtype\nself.default_value = default_value\nself.one_hot_classes = one_hot_classes\nself.smoothing = smoothing\nif self.one_hot_classes is not None and self.smoothing is not None:\n assert 0.0 < smoothing < 1.0, 'If smoothing is specified it must be in (0; 1), ' + f'got {smoothing}'", "scalar = self.dtype(element.get(self.input_key, self.default_value))\nif self.one_hot_classes is not None:\n scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)\noutput = {self.output_key: scalar}\nreturn output"], "bodies_text": "<|body_start_0|>\n super().__init__(input_key, output_key or input_key)\n self.dtype = dtype\n self.default_value = default_value\n self.one_hot_classes = one_hot_classes\n self.smoothing = smoothing\n if self.one_hot_classes is not None and self.smoothing is not None:\n assert 0.0 < smoothing < 1.0, 'If smoothing is specified it must be in (0; 1), ' + f'got {smoothing}'\n<|end_body_0|>\n\n<|body_start_1|>\n scalar = self.dtype(element.get(self.input_key, self.default_value))\n if self.one_hot_classes is not None:\n scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)\n output = {self.output_key: scalar}\n return output\n<|end_body_1|>\n", "class_docstring": "Numeric data reader abstraction. Reads a single float, int, str or other from data", "class_name": "ScalarReader", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScalarReader:\n \"\"\"Numeric data reader abstraction. Reads a single float, int, str or other from data\"\"\"\n\n def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None):\n \"\"\"Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\"\"\"\n <|body_0|>\n\n def __call__(self, element):\n \"\"\"Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(input_key, output_key or input_key)\n self.dtype = dtype\n self.default_value = default_value\n self.one_hot_classes = one_hot_classes\n self.smoothing = smoothing\n if self.one_hot_classes is not None and self.smoothing is not None:\n assert 0.0 < smoothing < 1.0, 'If smoothing is specified it must be in (0; 1), ' + f'got {smoothing}'\n<|end_body_0|>\n\n<|body_start_1|>\n scalar = self.dtype(element.get(self.input_key, self.default_value))\n if self.one_hot_classes is not None:\n scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)\n output = {self.output_key: scalar}\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000213", "length_bytes": 5229, "license_type": "permissive", "methods": [{"docstring": "Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes", "name": "__init__", "signature": "def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None)"}, {"docstring": "Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value", "name": "__call__", "signature": "def __call__(self, element)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000187", "prompt": "Implement the Python class `ScalarReader` described below.\n\nClass description:\nNumeric data reader abstraction. Reads a single float, int, str or other from data\n\nMethod signatures and docstrings:\n- def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None): Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\n- def __call__(self, element): Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value", "prompted_full_text": "Implement the Python class `ScalarReader` described below.\n\nClass description:\nNumeric data reader abstraction. Reads a single float, int, str or other from data\n\nMethod signatures and docstrings:\n- def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None): Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\n- def __call__(self, element): Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value\n\n<|skeleton|>\nclass ScalarReader:\n \"\"\"Numeric data reader abstraction. Reads a single float, int, str or other from data\"\"\"\n\n def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None):\n \"\"\"Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\"\"\"\n <|body_0|>\n\n def __call__(self, element):\n \"\"\"Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(input_key, output_key or input_key)\n self.dtype = dtype\n self.default_value = default_value\n self.one_hot_classes = one_hot_classes\n self.smoothing = smoothing\n if self.one_hot_classes is not None and self.smoothing is not None:\n assert 0.0 < smoothing < 1.0, 'If smoothing is specified it must be in (0; 1), ' + f'got {smoothing}'\n<|end_body_0|>\n\n<|body_start_1|>\n scalar = self.dtype(element.get(self.input_key, self.default_value))\n if self.one_hot_classes is not None:\n scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)\n output = {self.output_key: scalar}\n return output\n<|end_body_1|>\n", "revision_id": "e99f90655d0efcf22559a46e928f0f98c9807ebf", "skeleton": "<|skeleton|>\nclass ScalarReader:\n \"\"\"Numeric data reader abstraction. Reads a single float, int, str or other from data\"\"\"\n\n def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None):\n \"\"\"Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\"\"\"\n <|body_0|>\n\n def __call__(self, element):\n \"\"\"Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ScalarReader:\n \"\"\"Numeric data reader abstraction. Reads a single float, int, str or other from data\"\"\"\n\n def __init__(self, input_key: str, output_key: Optional[str]=None, dtype: Type=np.float32, default_value: float=None, one_hot_classes: int=None, smoothing: float=None):\n \"\"\"Args: input_key: input key to use from annotation dict output_key: output key to use to store the result, default: ``input_key`` dtype: datatype of scalar values to use default_value: default value to use if something goes wrong one_hot_classes: number of one-hot classes smoothing (float, optional): if specified applies label smoothing to one_hot classes\"\"\"\n super().__init__(input_key, output_key or input_key)\n self.dtype = dtype\n self.default_value = default_value\n self.one_hot_classes = one_hot_classes\n self.smoothing = smoothing\n if self.one_hot_classes is not None and self.smoothing is not None:\n assert 0.0 < smoothing < 1.0, 'If smoothing is specified it must be in (0; 1), ' + f'got {smoothing}'\n\n def __call__(self, element):\n \"\"\"Reads a row from your annotations dict and transfer it to a single value Args: element: elem in your dataset Returns: dtype: Scalar value\"\"\"\n scalar = self.dtype(element.get(self.input_key, self.default_value))\n if self.one_hot_classes is not None:\n scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)\n output = {self.output_key: scalar}\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "catalyst/contrib/data/reader.py", "source_repo": "catalyst-team/catalyst", "split": "val", "star_events_count": 3038} {"blob_id": "47ac04cd5de60bdf85c5d26d2c6df88e70645ab0", "bodies": ["fake_get_distribution.side_effect = [versions.DistributionNotFound()]\nv = versions.get_iiq_version()\nself.assertTrue(v is None)", "fake_get_distribution.side_effect = [versions.DistributionNotFound()]\nv = versions.get_iiqtools_version()\nself.assertTrue(v is None)", "fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\nv = versions.get_iiq_version()\nself.assertTrue(isinstance(v, versions.Version))", "fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\nv = versions.get_iiqtools_version()\nself.assertTrue(isinstance(v, versions.Version))"], "bodies_text": "<|body_start_0|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiq_version()\n self.assertTrue(v is None)\n<|end_body_0|>\n\n<|body_start_1|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiqtools_version()\n self.assertTrue(v is None)\n<|end_body_1|>\n\n<|body_start_2|>\n fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\n v = versions.get_iiq_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_2|>\n\n<|body_start_3|>\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n v = versions.get_iiqtools_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_3|>\n", "class_docstring": "A suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions", "class_name": "TestGetVersions", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestGetVersions:\n \"\"\"A suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\"\"\"\n\n def test_get_iiq_version(self, fake_get_distribution):\n \"\"\"None is returned if InsightIQ is not installed\"\"\"\n <|body_0|>\n\n def test_get_iiqtools_version(self, fake_get_distribution):\n \"\"\"None is returned if IIQTools is not installed\"\"\"\n <|body_1|>\n\n def test_get_iiq_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if InsightIQ is installed\"\"\"\n <|body_2|>\n\n def test_get_iiqtools_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if IIQTools is installed\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiq_version()\n self.assertTrue(v is None)\n<|end_body_0|>\n\n<|body_start_1|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiqtools_version()\n self.assertTrue(v is None)\n<|end_body_1|>\n\n<|body_start_2|>\n fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\n v = versions.get_iiq_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_2|>\n\n<|body_start_3|>\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n v = versions.get_iiqtools_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000214", "length_bytes": 15840, "license_type": "permissive", "methods": [{"docstring": "None is returned if InsightIQ is not installed", "name": "test_get_iiq_version", "signature": "def test_get_iiq_version(self, fake_get_distribution)"}, {"docstring": "None is returned if IIQTools is not installed", "name": "test_get_iiqtools_version", "signature": "def test_get_iiqtools_version(self, fake_get_distribution)"}, {"docstring": "Version is returned if InsightIQ is installed", "name": "test_get_iiq_version_ok", "signature": "def test_get_iiq_version_ok(self, fake_get_distribution)"}, {"docstring": "Version is returned if IIQTools is installed", "name": "test_get_iiqtools_version_ok", "signature": "def test_get_iiqtools_version_ok(self, fake_get_distribution)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004947", "prompt": "Implement the Python class `TestGetVersions` described below.\n\nClass description:\nA suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\n\nMethod signatures and docstrings:\n- def test_get_iiq_version(self, fake_get_distribution): None is returned if InsightIQ is not installed\n- def test_get_iiqtools_version(self, fake_get_distribution): None is returned if IIQTools is not installed\n- def test_get_iiq_version_ok(self, fake_get_distribution): Version is returned if InsightIQ is installed\n- def test_get_iiqtools_version_ok(self, fake_get_distribution): Version is returned if IIQTools is installed", "prompted_full_text": "Implement the Python class `TestGetVersions` described below.\n\nClass description:\nA suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\n\nMethod signatures and docstrings:\n- def test_get_iiq_version(self, fake_get_distribution): None is returned if InsightIQ is not installed\n- def test_get_iiqtools_version(self, fake_get_distribution): None is returned if IIQTools is not installed\n- def test_get_iiq_version_ok(self, fake_get_distribution): Version is returned if InsightIQ is installed\n- def test_get_iiqtools_version_ok(self, fake_get_distribution): Version is returned if IIQTools is installed\n\n<|skeleton|>\nclass TestGetVersions:\n \"\"\"A suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\"\"\"\n\n def test_get_iiq_version(self, fake_get_distribution):\n \"\"\"None is returned if InsightIQ is not installed\"\"\"\n <|body_0|>\n\n def test_get_iiqtools_version(self, fake_get_distribution):\n \"\"\"None is returned if IIQTools is not installed\"\"\"\n <|body_1|>\n\n def test_get_iiq_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if InsightIQ is installed\"\"\"\n <|body_2|>\n\n def test_get_iiqtools_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if IIQTools is installed\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiq_version()\n self.assertTrue(v is None)\n<|end_body_0|>\n\n<|body_start_1|>\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiqtools_version()\n self.assertTrue(v is None)\n<|end_body_1|>\n\n<|body_start_2|>\n fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\n v = versions.get_iiq_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_2|>\n\n<|body_start_3|>\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n v = versions.get_iiqtools_version()\n self.assertTrue(isinstance(v, versions.Version))\n<|end_body_3|>\n", "revision_id": "a44a8ee9a299c7711b3abd69d21c24f55f2ae84e", "skeleton": "<|skeleton|>\nclass TestGetVersions:\n \"\"\"A suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\"\"\"\n\n def test_get_iiq_version(self, fake_get_distribution):\n \"\"\"None is returned if InsightIQ is not installed\"\"\"\n <|body_0|>\n\n def test_get_iiqtools_version(self, fake_get_distribution):\n \"\"\"None is returned if IIQTools is not installed\"\"\"\n <|body_1|>\n\n def test_get_iiq_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if InsightIQ is installed\"\"\"\n <|body_2|>\n\n def test_get_iiqtools_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if IIQTools is installed\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestGetVersions:\n \"\"\"A suite of tests for the ``get_iiq_version`` and ``get_iiqtools_version`` functions\"\"\"\n\n def test_get_iiq_version(self, fake_get_distribution):\n \"\"\"None is returned if InsightIQ is not installed\"\"\"\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiq_version()\n self.assertTrue(v is None)\n\n def test_get_iiqtools_version(self, fake_get_distribution):\n \"\"\"None is returned if IIQTools is not installed\"\"\"\n fake_get_distribution.side_effect = [versions.DistributionNotFound()]\n v = versions.get_iiqtools_version()\n self.assertTrue(v is None)\n\n def test_get_iiq_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if InsightIQ is installed\"\"\"\n fake_get_distribution.return_value = self.FakeDistVersion('3.3.4')\n v = versions.get_iiq_version()\n self.assertTrue(isinstance(v, versions.Version))\n\n def test_get_iiqtools_version_ok(self, fake_get_distribution):\n \"\"\"Version is returned if IIQTools is installed\"\"\"\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n v = versions.get_iiqtools_version()\n self.assertTrue(isinstance(v, versions.Version))\n", "source": "the_stack_v2_python_sparse", "source_path": "iiqtools_tests/utils/test_versions.py", "source_repo": "willnx/iiqtools", "split": "val", "star_events_count": 5} {"blob_id": "311fd6ae560e67365300344f56e6f7c6b238c27a", "bodies": ["self.matrix = matrix\nself.preSumMatrix = []\nfor i in range(len(matrix)):\n cur = 0\n self.preSumMatrix.append([])\n for j in range(len(matrix[0])):\n cur += matrix[i][j]\n self.preSumMatrix[i].append(cur)", "diff = val - self.matrix[row][col]\nfor i in range(col, len(self.preSumMatrix[0])):\n self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff\nself.matrix[row][col] = val", "ans = 0\nfor i in range(row1, row2 + 1):\n ans += self.preSumMatrix[i][col2]\n if col1 != 0:\n ans -= self.preSumMatrix[i][col1 - 1]\nreturn ans"], "bodies_text": "<|body_start_0|>\n self.matrix = matrix\n self.preSumMatrix = []\n for i in range(len(matrix)):\n cur = 0\n self.preSumMatrix.append([])\n for j in range(len(matrix[0])):\n cur += matrix[i][j]\n self.preSumMatrix[i].append(cur)\n<|end_body_0|>\n\n<|body_start_1|>\n diff = val - self.matrix[row][col]\n for i in range(col, len(self.preSumMatrix[0])):\n self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff\n self.matrix[row][col] = val\n<|end_body_1|>\n\n<|body_start_2|>\n ans = 0\n for i in range(row1, row2 + 1):\n ans += self.preSumMatrix[i][col2]\n if col1 != 0:\n ans -= self.preSumMatrix[i][col1 - 1]\n return ans\n<|end_body_2|>\n", "class_docstring": "", "class_name": "NumMatrix", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def update(self, row, col, val):\n \"\"\":type row: int :type col: int :type val: int :rtype: void\"\"\"\n <|body_1|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = matrix\n self.preSumMatrix = []\n for i in range(len(matrix)):\n cur = 0\n self.preSumMatrix.append([])\n for j in range(len(matrix[0])):\n cur += matrix[i][j]\n self.preSumMatrix[i].append(cur)\n<|end_body_0|>\n\n<|body_start_1|>\n diff = val - self.matrix[row][col]\n for i in range(col, len(self.preSumMatrix[0])):\n self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff\n self.matrix[row][col] = val\n<|end_body_1|>\n\n<|body_start_2|>\n ans = 0\n for i in range(row1, row2 + 1):\n ans += self.preSumMatrix[i][col2]\n if col1 != 0:\n ans -= self.preSumMatrix[i][col1 - 1]\n return ans\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000215", "length_bytes": 5380, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]]", "name": "__init__", "signature": "def __init__(self, matrix)"}, {"docstring": ":type row: int :type col: int :type val: int :rtype: void", "name": "update", "signature": "def update(self, row, col, val)"}, {"docstring": ":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "name": "sumRegion", "signature": "def sumRegion(self, row1, col1, row2, col2)"}], "n_methods": 3, "prompt": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def update(self, row, col, val): :type row: int :type col: int :type val: int :rtype: void\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "prompted_full_text": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def update(self, row, col, val): :type row: int :type col: int :type val: int :rtype: void\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\n\n<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def update(self, row, col, val):\n \"\"\":type row: int :type col: int :type val: int :rtype: void\"\"\"\n <|body_1|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = matrix\n self.preSumMatrix = []\n for i in range(len(matrix)):\n cur = 0\n self.preSumMatrix.append([])\n for j in range(len(matrix[0])):\n cur += matrix[i][j]\n self.preSumMatrix[i].append(cur)\n<|end_body_0|>\n\n<|body_start_1|>\n diff = val - self.matrix[row][col]\n for i in range(col, len(self.preSumMatrix[0])):\n self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff\n self.matrix[row][col] = val\n<|end_body_1|>\n\n<|body_start_2|>\n ans = 0\n for i in range(row1, row2 + 1):\n ans += self.preSumMatrix[i][col2]\n if col1 != 0:\n ans -= self.preSumMatrix[i][col1 - 1]\n return ans\n<|end_body_2|>\n", "revision_id": "fd310ec0a989e003242f1840230aaac150f006f0", "skeleton": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def update(self, row, col, val):\n \"\"\":type row: int :type col: int :type val: int :rtype: void\"\"\"\n <|body_1|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NumMatrix:\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n self.matrix = matrix\n self.preSumMatrix = []\n for i in range(len(matrix)):\n cur = 0\n self.preSumMatrix.append([])\n for j in range(len(matrix[0])):\n cur += matrix[i][j]\n self.preSumMatrix[i].append(cur)\n\n def update(self, row, col, val):\n \"\"\":type row: int :type col: int :type val: int :rtype: void\"\"\"\n diff = val - self.matrix[row][col]\n for i in range(col, len(self.preSumMatrix[0])):\n self.preSumMatrix[row][i] = self.preSumMatrix[row][i] + diff\n self.matrix[row][col] = val\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n ans = 0\n for i in range(row1, row2 + 1):\n ans += self.preSumMatrix[i][col2]\n if col1 != 0:\n ans -= self.preSumMatrix[i][col1 - 1]\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "好咧,最后还是要搞google/hard/RangeSumQuery2DMutable308.py", "source_repo": "jing1988a/python_fb", "split": "val", "star_events_count": 0} {"blob_id": "422b2dea41a60c93dbdd4038c9d1b9c3e1e716c8", "bodies": ["dp, all = (collections.defaultdict(int), collections.defaultdict(int))\n\ndef dfs(node):\n if node in dp:\n return dp[node]\n if not node.left and (not node.right):\n dp[node], all[node] = (1, 0)\n return 1\n if node.left:\n all[node] += dfs(node.left)\n if node.right:\n all[node] += dfs(node.right)\n dp[node] = 1 + all[node.left] + all[node.right]\n all[node] = min(dp[node], dp[node.left] + dp[node.right])\n return dp[node]\ndfs(root)\nreturn all[root] if all[root] else dp[root]", "def dfs(node):\n if not node:\n return (0, 0, float('inf'))\n l0, l1, l2 = dfs(node.left)\n r0, r1, r2 = dfs(node.right)\n dp0 = l1 + r1\n dp1 = min(l2 + min(r1, r2), r2 + min(l1, l2))\n dp2 = 1 + min(l0, l1, l2) + min(r0, r1, r2)\n return (dp0, dp1, dp2)\na, b, c = dfs(root)\nreturn min(b, c)", "ans, monitored = ([0], {None})\n\ndef dfs(node, parent=None):\n if node:\n dfs(node.left, node)\n dfs(node.right, node)\n if parent is None and node not in monitored or node.left not in monitored or node.right not in monitored:\n ans[0] += 1\n monitored.update({node, parent, node.left, node.right})\ndfs(root)\nreturn ans[0]"], "bodies_text": "<|body_start_0|>\n dp, all = (collections.defaultdict(int), collections.defaultdict(int))\n\n def dfs(node):\n if node in dp:\n return dp[node]\n if not node.left and (not node.right):\n dp[node], all[node] = (1, 0)\n return 1\n if node.left:\n all[node] += dfs(node.left)\n if node.right:\n all[node] += dfs(node.right)\n dp[node] = 1 + all[node.left] + all[node.right]\n all[node] = min(dp[node], dp[node.left] + dp[node.right])\n return dp[node]\n dfs(root)\n return all[root] if all[root] else dp[root]\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node):\n if not node:\n return (0, 0, float('inf'))\n l0, l1, l2 = dfs(node.left)\n r0, r1, r2 = dfs(node.right)\n dp0 = l1 + r1\n dp1 = min(l2 + min(r1, r2), r2 + min(l1, l2))\n dp2 = 1 + min(l0, l1, l2) + min(r0, r1, r2)\n return (dp0, dp1, dp2)\n a, b, c = dfs(root)\n return min(b, c)\n<|end_body_1|>\n\n<|body_start_2|>\n ans, monitored = ([0], {None})\n\n def dfs(node, parent=None):\n if node:\n dfs(node.left, node)\n dfs(node.right, node)\n if parent is None and node not in monitored or node.left not in monitored or node.right not in monitored:\n ans[0] += 1\n monitored.update({node, parent, node.left, node.right})\n dfs(root)\n return ans[0]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minCameraCover_wrong(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minCameraCover(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def minCameraCover2(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp, all = (collections.defaultdict(int), collections.defaultdict(int))\n\n def dfs(node):\n if node in dp:\n return dp[node]\n if not node.left and (not node.right):\n dp[node], all[node] = (1, 0)\n return 1\n if node.left:\n all[node] += dfs(node.left)\n if node.right:\n all[node] += dfs(node.right)\n dp[node] = 1 + all[node.left] + all[node.right]\n all[node] = min(dp[node], dp[node.left] + dp[node.right])\n return dp[node]\n dfs(root)\n return all[root] if all[root] else dp[root]\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node):\n if not node:\n return (0, 0, float('inf'))\n l0, l1, l2 = dfs(node.left)\n r0, r1, r2 = dfs(node.right)\n dp0 = l1 + r1\n dp1 = min(l2 + min(r1, r2), r2 + min(l1, l2))\n dp2 = 1 + min(l0, l1, l2) + min(r0, r1, r2)\n return (dp0, dp1, dp2)\n a, b, c = dfs(root)\n return min(b, c)\n<|end_body_1|>\n\n<|body_start_2|>\n ans, monitored = ([0], {None})\n\n def dfs(node, parent=None):\n if node:\n dfs(node.left, node)\n dfs(node.right, node)\n if parent is None and node not in monitored or node.left not in monitored or node.right not in monitored:\n ans[0] += 1\n monitored.update({node, parent, node.left, node.right})\n dfs(root)\n return ans[0]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000216", "length_bytes": 4473, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int", "name": "minCameraCover_wrong", "signature": "def minCameraCover_wrong(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int", "name": "minCameraCover", "signature": "def minCameraCover(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int", "name": "minCameraCover2", "signature": "def minCameraCover2(self, root)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCameraCover_wrong(self, root): :type root: TreeNode :rtype: int\n- def minCameraCover(self, root): :type root: TreeNode :rtype: int\n- def minCameraCover2(self, root): :type root: TreeNode :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCameraCover_wrong(self, root): :type root: TreeNode :rtype: int\n- def minCameraCover(self, root): :type root: TreeNode :rtype: int\n- def minCameraCover2(self, root): :type root: TreeNode :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def minCameraCover_wrong(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minCameraCover(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def minCameraCover2(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp, all = (collections.defaultdict(int), collections.defaultdict(int))\n\n def dfs(node):\n if node in dp:\n return dp[node]\n if not node.left and (not node.right):\n dp[node], all[node] = (1, 0)\n return 1\n if node.left:\n all[node] += dfs(node.left)\n if node.right:\n all[node] += dfs(node.right)\n dp[node] = 1 + all[node.left] + all[node.right]\n all[node] = min(dp[node], dp[node.left] + dp[node.right])\n return dp[node]\n dfs(root)\n return all[root] if all[root] else dp[root]\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node):\n if not node:\n return (0, 0, float('inf'))\n l0, l1, l2 = dfs(node.left)\n r0, r1, r2 = dfs(node.right)\n dp0 = l1 + r1\n dp1 = min(l2 + min(r1, r2), r2 + min(l1, l2))\n dp2 = 1 + min(l0, l1, l2) + min(r0, r1, r2)\n return (dp0, dp1, dp2)\n a, b, c = dfs(root)\n return min(b, c)\n<|end_body_1|>\n\n<|body_start_2|>\n ans, monitored = ([0], {None})\n\n def dfs(node, parent=None):\n if node:\n dfs(node.left, node)\n dfs(node.right, node)\n if parent is None and node not in monitored or node.left not in monitored or node.right not in monitored:\n ans[0] += 1\n monitored.update({node, parent, node.left, node.right})\n dfs(root)\n return ans[0]\n<|end_body_2|>\n", "revision_id": "340ae58fb65b97aa6c6ab2daa8cbd82d1093deae", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minCameraCover_wrong(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minCameraCover(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def minCameraCover2(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minCameraCover_wrong(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n dp, all = (collections.defaultdict(int), collections.defaultdict(int))\n\n def dfs(node):\n if node in dp:\n return dp[node]\n if not node.left and (not node.right):\n dp[node], all[node] = (1, 0)\n return 1\n if node.left:\n all[node] += dfs(node.left)\n if node.right:\n all[node] += dfs(node.right)\n dp[node] = 1 + all[node.left] + all[node.right]\n all[node] = min(dp[node], dp[node.left] + dp[node.right])\n return dp[node]\n dfs(root)\n return all[root] if all[root] else dp[root]\n\n def minCameraCover(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n def dfs(node):\n if not node:\n return (0, 0, float('inf'))\n l0, l1, l2 = dfs(node.left)\n r0, r1, r2 = dfs(node.right)\n dp0 = l1 + r1\n dp1 = min(l2 + min(r1, r2), r2 + min(l1, l2))\n dp2 = 1 + min(l0, l1, l2) + min(r0, r1, r2)\n return (dp0, dp1, dp2)\n a, b, c = dfs(root)\n return min(b, c)\n\n def minCameraCover2(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n ans, monitored = ([0], {None})\n\n def dfs(node, parent=None):\n if node:\n dfs(node.left, node)\n dfs(node.right, node)\n if parent is None and node not in monitored or node.left not in monitored or node.right not in monitored:\n ans[0] += 1\n monitored.update({node, parent, node.left, node.right})\n dfs(root)\n return ans[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "learnpythonthehardway/Binary-Tree-Cameras-968.py", "source_repo": "dgpllc/leetcode-python", "split": "val", "star_events_count": 0} {"blob_id": "d1892a9c3e34c6e1557fb2d8e310262b03e3468b", "bodies": ["super(MainWindow, self).__init__()\nself.w = 0\nself.h = 0\nself.init_ui(name, title)", "self.w = 140\nself.h = 100\nself.setObjectName(name)\nself.setWindowTitle(title)\nself.resize(self.w, self.h)\nself.custom_edit = CustomEdit(self, size=(10, 10, 120, 24), name='custom_edit', search=False)\nself.custom_edit.setPlaceholderText('我是自定义输入框')"], "bodies_text": "<|body_start_0|>\n super(MainWindow, self).__init__()\n self.w = 0\n self.h = 0\n self.init_ui(name, title)\n<|end_body_0|>\n\n<|body_start_1|>\n self.w = 140\n self.h = 100\n self.setObjectName(name)\n self.setWindowTitle(title)\n self.resize(self.w, self.h)\n self.custom_edit = CustomEdit(self, size=(10, 10, 120, 24), name='custom_edit', search=False)\n self.custom_edit.setPlaceholderText('我是自定义输入框')\n<|end_body_1|>\n", "class_docstring": "主窗口,继承了QMainWindow类", "class_name": "MainWindow", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MainWindow:\n \"\"\"主窗口,继承了QMainWindow类\"\"\"\n\n def __init__(self, name, title):\n \"\"\"初始化类的成员变量\"\"\"\n <|body_0|>\n\n def init_ui(self, name, title):\n \"\"\"初始化UI界面\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MainWindow, self).__init__()\n self.w = 0\n self.h = 0\n self.init_ui(name, title)\n<|end_body_0|>\n\n<|body_start_1|>\n self.w = 140\n self.h = 100\n self.setObjectName(name)\n self.setWindowTitle(title)\n self.resize(self.w, self.h)\n self.custom_edit = CustomEdit(self, size=(10, 10, 120, 24), name='custom_edit', search=False)\n self.custom_edit.setPlaceholderText('我是自定义输入框')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000217", "length_bytes": 3141, "license_type": "no_license", "methods": [{"docstring": "初始化类的成员变量", "name": "__init__", "signature": "def __init__(self, name, title)"}, {"docstring": "初始化UI界面", "name": "init_ui", "signature": "def init_ui(self, name, title)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007319", "prompt": "Implement the Python class `MainWindow` described below.\n\nClass description:\n主窗口,继承了QMainWindow类\n\nMethod signatures and docstrings:\n- def __init__(self, name, title): 初始化类的成员变量\n- def init_ui(self, name, title): 初始化UI界面", "prompted_full_text": "Implement the Python class `MainWindow` described below.\n\nClass description:\n主窗口,继承了QMainWindow类\n\nMethod signatures and docstrings:\n- def __init__(self, name, title): 初始化类的成员变量\n- def init_ui(self, name, title): 初始化UI界面\n\n<|skeleton|>\nclass MainWindow:\n \"\"\"主窗口,继承了QMainWindow类\"\"\"\n\n def __init__(self, name, title):\n \"\"\"初始化类的成员变量\"\"\"\n <|body_0|>\n\n def init_ui(self, name, title):\n \"\"\"初始化UI界面\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MainWindow, self).__init__()\n self.w = 0\n self.h = 0\n self.init_ui(name, title)\n<|end_body_0|>\n\n<|body_start_1|>\n self.w = 140\n self.h = 100\n self.setObjectName(name)\n self.setWindowTitle(title)\n self.resize(self.w, self.h)\n self.custom_edit = CustomEdit(self, size=(10, 10, 120, 24), name='custom_edit', search=False)\n self.custom_edit.setPlaceholderText('我是自定义输入框')\n<|end_body_1|>\n", "revision_id": "b86a49e4b8c7c8c3d8546ce1b49f8f3bb6332307", "skeleton": "<|skeleton|>\nclass MainWindow:\n \"\"\"主窗口,继承了QMainWindow类\"\"\"\n\n def __init__(self, name, title):\n \"\"\"初始化类的成员变量\"\"\"\n <|body_0|>\n\n def init_ui(self, name, title):\n \"\"\"初始化UI界面\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MainWindow:\n \"\"\"主窗口,继承了QMainWindow类\"\"\"\n\n def __init__(self, name, title):\n \"\"\"初始化类的成员变量\"\"\"\n super(MainWindow, self).__init__()\n self.w = 0\n self.h = 0\n self.init_ui(name, title)\n\n def init_ui(self, name, title):\n \"\"\"初始化UI界面\"\"\"\n self.w = 140\n self.h = 100\n self.setObjectName(name)\n self.setWindowTitle(title)\n self.resize(self.w, self.h)\n self.custom_edit = CustomEdit(self, size=(10, 10, 120, 24), name='custom_edit', search=False)\n self.custom_edit.setPlaceholderText('我是自定义输入框')\n", "source": "the_stack_v2_python_sparse", "source_path": "assembly/search_completer/search_11.py", "source_repo": "masknugget/mypyqt", "split": "val", "star_events_count": 0} {"blob_id": "5662a5946d92b0dd126207f65ac366b9433538ce", "bodies": ["activation = self.params.get('attentionActivation', None)\nif activation == 'None':\n activation = None\nfeature_vector_size = K.int_shape(merged_input)[-1]\natt_layer = layers.TimeDistributed(layers.Dense(feature_vector_size, activation=activation), name='attention_matrix_score')(merged_input)\natt_layer = layers.Lambda(lambda x: K.mean(x, axis=2), name='attention_vector_score')(att_layer)\natt_layer = layers.Permute((2, 1))(layers.RepeatVector(feature_vector_size)(att_layer))\nmerged_input = layers.multiply([att_layer, merged_input])\nreturn merged_input", "layer = model.get_layer('attention_vector_score')\nattention_model = Model(inputs=model.input, outputs=[layer.output, model.output])\nreturn attention_model.predict(input_)", "pred_labels = []\natt_scores = []\nsentenceLengths = self.getSentenceLengths(sentences)\nfor start in range(0, len(sentences), batch_size):\n end = start + batch_size\n instances = []\n for feature_name in self.params['featureNames']:\n input_data = pad_sequences([numpy.asarray(instance[feature_name]) for instance in sentences[start:end]], self.max_sentece_length)\n instances.append(input_data)\n if not return_attention:\n predictions = model.predict(instances, verbose=False)\n else:\n attention, predictions = self.label_and_attention(model, instances)\n predictions = predictions.argmax(axis=-1)\n for index, (pred, sentence) in enumerate(zip(predictions, sentences[start:end])):\n sentence_len = len(sentence[feature_name])\n pred_labels.append(pred[-sentence_len:])\n if return_attention:\n att_scores.append(attention[index, -sentence_len:])\nreturn (numpy.asarray(pred_labels), numpy.asarray(att_scores))", "if 'characters' in self.params['featureNames']:\n self.padCharacters(sentences)\nlabels = {}\nattention = {}\nfor model_name, model in self.models.items():\n padded_pred_labels, padded_att_scores = self.model_predict(model, sentences, return_attention)\n pred_labels = []\n att_scores = []\n for index, (padded_pred, sentence) in enumerate(zip(padded_pred_labels, sentences)):\n no_pad_tokens = numpy.where(numpy.asarray(sentence['tokens']))[0]\n if no_pad_tokens.max() > padded_pred.shape[0]:\n missing = no_pad_tokens.max() - padded_pred.shape[0]\n pred_labels.append(numpy.pad(padded_pred, (0, missing), 'constant'))\n if return_attention:\n att_scores.append(numpy.pad(padded_att_scores[index], (0, missing), 'constant'))\n else:\n pred_labels.append(padded_pred[no_pad_tokens])\n if return_attention:\n att_scores.append(padded_att_scores[index][no_pad_tokens])\n attention[model_name] = att_scores\n if not translate_labels:\n labels[model_name] = pred_labels\n continue\n idx2Label = self.idx2Labels[model_name]\n labels[model_name] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in pred_labels]\nif return_attention:\n return (labels, attention)\nreturn labels"], "bodies_text": "<|body_start_0|>\n activation = self.params.get('attentionActivation', None)\n if activation == 'None':\n activation = None\n feature_vector_size = K.int_shape(merged_input)[-1]\n att_layer = layers.TimeDistributed(layers.Dense(feature_vector_size, activation=activation), name='attention_matrix_score')(merged_input)\n att_layer = layers.Lambda(lambda x: K.mean(x, axis=2), name='attention_vector_score')(att_layer)\n att_layer = layers.Permute((2, 1))(layers.RepeatVector(feature_vector_size)(att_layer))\n merged_input = layers.multiply([att_layer, merged_input])\n return merged_input\n<|end_body_0|>\n\n<|body_start_1|>\n layer = model.get_layer('attention_vector_score')\n attention_model = Model(inputs=model.input, outputs=[layer.output, model.output])\n return attention_model.predict(input_)\n<|end_body_1|>\n\n<|body_start_2|>\n pred_labels = []\n att_scores = []\n sentenceLengths = self.getSentenceLengths(sentences)\n for start in range(0, len(sentences), batch_size):\n end = start + batch_size\n instances = []\n for feature_name in self.params['featureNames']:\n input_data = pad_sequences([numpy.asarray(instance[feature_name]) for instance in sentences[start:end]], self.max_sentece_length)\n instances.append(input_data)\n if not return_attention:\n predictions = model.predict(instances, verbose=False)\n else:\n attention, predictions = self.label_and_attention(model, instances)\n predictions = predictions.argmax(axis=-1)\n for index, (pred, sentence) in enumerate(zip(predictions, sentences[start:end])):\n sentence_len = len(sentence[feature_name])\n pred_labels.append(pred[-sentence_len:])\n if return_attention:\n att_scores.append(attention[index, -sentence_len:])\n return (numpy.asarray(pred_labels), numpy.asarray(att_scores))\n<|end_body_2|>\n\n<|body_start_3|>\n if 'characters' in self.params['featureNames']:\n self.padCharacters(sentences)\n labels = {}\n attention = {}\n for model_name, model in self.models.items():\n padded_pred_labels, padded_att_scores = self.model_predict(model, sentences, return_attention)\n pred_labels = []\n att_scores = []\n for index, (padded_pred, sentence) in enumerate(zip(padded_pred_labels, sentences)):\n no_pad_tokens = numpy.where(numpy.asarray(sentence['tokens']))[0]\n if no_pad_tokens.max() > padded_pred.shape[0]:\n missing = no_pad_tokens.max() - padded_pred.shape[0]\n pred_labels.append(numpy.pad(padded_pred, (0, missing), 'constant'))\n if return_attention:\n att_scores.append(numpy.pad(padded_att_scores[index], (0, missing), 'constant'))\n else:\n pred_labels.append(padded_pred[no_pad_tokens])\n if return_attention:\n att_scores.append(padded_att_scores[index][no_pad_tokens])\n attention[model_name] = att_scores\n if not translate_labels:\n labels[model_name] = pred_labels\n continue\n idx2Label = self.idx2Labels[model_name]\n labels[model_name] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in pred_labels]\n if return_attention:\n return (labels, attention)\n return labels\n<|end_body_3|>\n", "class_docstring": "Bidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.", "class_name": "TimePreAttArgBiLSTM", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TimePreAttArgBiLSTM:\n \"\"\"Bidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\"\"\"\n\n def addPreAttentionLayer(self, merged_input):\n \"\"\"Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\"\"\"\n <|body_0|>\n\n def label_and_attention(self, model, input_):\n \"\"\"Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\"\"\"\n <|body_1|>\n\n def model_predict(self, model, sentences, return_attention=False, batch_size=32):\n \"\"\"Model probability distribution over set of labels for sentences. Args: model: a Keras model.\"\"\"\n <|body_2|>\n\n def predict(self, sentences, return_attention=False, translate_labels=True):\n \"\"\"Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n activation = self.params.get('attentionActivation', None)\n if activation == 'None':\n activation = None\n feature_vector_size = K.int_shape(merged_input)[-1]\n att_layer = layers.TimeDistributed(layers.Dense(feature_vector_size, activation=activation), name='attention_matrix_score')(merged_input)\n att_layer = layers.Lambda(lambda x: K.mean(x, axis=2), name='attention_vector_score')(att_layer)\n att_layer = layers.Permute((2, 1))(layers.RepeatVector(feature_vector_size)(att_layer))\n merged_input = layers.multiply([att_layer, merged_input])\n return merged_input\n<|end_body_0|>\n\n<|body_start_1|>\n layer = model.get_layer('attention_vector_score')\n attention_model = Model(inputs=model.input, outputs=[layer.output, model.output])\n return attention_model.predict(input_)\n<|end_body_1|>\n\n<|body_start_2|>\n pred_labels = []\n att_scores = []\n sentenceLengths = self.getSentenceLengths(sentences)\n for start in range(0, len(sentences), batch_size):\n end = start + batch_size\n instances = []\n for feature_name in self.params['featureNames']:\n input_data = pad_sequences([numpy.asarray(instance[feature_name]) for instance in sentences[start:end]], self.max_sentece_length)\n instances.append(input_data)\n if not return_attention:\n predictions = model.predict(instances, verbose=False)\n else:\n attention, predictions = self.label_and_attention(model, instances)\n predictions = predictions.argmax(axis=-1)\n for index, (pred, sentence) in enumerate(zip(predictions, sentences[start:end])):\n sentence_len = len(sentence[feature_name])\n pred_labels.append(pred[-sentence_len:])\n if return_attention:\n att_scores.append(attention[index, -sentence_len:])\n return (numpy.asarray(pred_labels), numpy.asarray(att_scores))\n<|end_body_2|>\n\n<|body_start_3|>\n if 'characters' in self.params['featureNames']:\n self.padCharacters(sentences)\n labels = {}\n attention = {}\n for model_name, model in self.models.items():\n padded_pred_labels, padded_att_scores = self.model_predict(model, sentences, return_attention)\n pred_labels = []\n att_scores = []\n for index, (padded_pred, sentence) in enumerate(zip(padded_pred_labels, sentences)):\n no_pad_tokens = numpy.where(numpy.asarray(sentence['tokens']))[0]\n if no_pad_tokens.max() > padded_pred.shape[0]:\n missing = no_pad_tokens.max() - padded_pred.shape[0]\n pred_labels.append(numpy.pad(padded_pred, (0, missing), 'constant'))\n if return_attention:\n att_scores.append(numpy.pad(padded_att_scores[index], (0, missing), 'constant'))\n else:\n pred_labels.append(padded_pred[no_pad_tokens])\n if return_attention:\n att_scores.append(padded_att_scores[index][no_pad_tokens])\n attention[model_name] = att_scores\n if not translate_labels:\n labels[model_name] = pred_labels\n continue\n idx2Label = self.idx2Labels[model_name]\n labels[model_name] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in pred_labels]\n if return_attention:\n return (labels, attention)\n return labels\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000218", "length_bytes": 12548, "license_type": "no_license", "methods": [{"docstring": "Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input", "name": "addPreAttentionLayer", "signature": "def addPreAttentionLayer(self, merged_input)"}, {"docstring": "Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.", "name": "label_and_attention", "signature": "def label_and_attention(self, model, input_)"}, {"docstring": "Model probability distribution over set of labels for sentences. Args: model: a Keras model.", "name": "model_predict", "signature": "def model_predict(self, model, sentences, return_attention=False, batch_size=32)"}, {"docstring": "Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.", "name": "predict", "signature": "def predict(self, sentences, return_attention=False, translate_labels=True)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003858", "prompt": "Implement the Python class `TimePreAttArgBiLSTM` described below.\n\nClass description:\nBidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\n\nMethod signatures and docstrings:\n- def addPreAttentionLayer(self, merged_input): Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\n- def label_and_attention(self, model, input_): Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\n- def model_predict(self, model, sentences, return_attention=False, batch_size=32): Model probability distribution over set of labels for sentences. Args: model: a Keras model.\n- def predict(self, sentences, return_attention=False, translate_labels=True): Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.", "prompted_full_text": "Implement the Python class `TimePreAttArgBiLSTM` described below.\n\nClass description:\nBidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\n\nMethod signatures and docstrings:\n- def addPreAttentionLayer(self, merged_input): Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\n- def label_and_attention(self, model, input_): Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\n- def model_predict(self, model, sentences, return_attention=False, batch_size=32): Model probability distribution over set of labels for sentences. Args: model: a Keras model.\n- def predict(self, sentences, return_attention=False, translate_labels=True): Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.\n\n<|skeleton|>\nclass TimePreAttArgBiLSTM:\n \"\"\"Bidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\"\"\"\n\n def addPreAttentionLayer(self, merged_input):\n \"\"\"Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\"\"\"\n <|body_0|>\n\n def label_and_attention(self, model, input_):\n \"\"\"Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\"\"\"\n <|body_1|>\n\n def model_predict(self, model, sentences, return_attention=False, batch_size=32):\n \"\"\"Model probability distribution over set of labels for sentences. Args: model: a Keras model.\"\"\"\n <|body_2|>\n\n def predict(self, sentences, return_attention=False, translate_labels=True):\n \"\"\"Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n activation = self.params.get('attentionActivation', None)\n if activation == 'None':\n activation = None\n feature_vector_size = K.int_shape(merged_input)[-1]\n att_layer = layers.TimeDistributed(layers.Dense(feature_vector_size, activation=activation), name='attention_matrix_score')(merged_input)\n att_layer = layers.Lambda(lambda x: K.mean(x, axis=2), name='attention_vector_score')(att_layer)\n att_layer = layers.Permute((2, 1))(layers.RepeatVector(feature_vector_size)(att_layer))\n merged_input = layers.multiply([att_layer, merged_input])\n return merged_input\n<|end_body_0|>\n\n<|body_start_1|>\n layer = model.get_layer('attention_vector_score')\n attention_model = Model(inputs=model.input, outputs=[layer.output, model.output])\n return attention_model.predict(input_)\n<|end_body_1|>\n\n<|body_start_2|>\n pred_labels = []\n att_scores = []\n sentenceLengths = self.getSentenceLengths(sentences)\n for start in range(0, len(sentences), batch_size):\n end = start + batch_size\n instances = []\n for feature_name in self.params['featureNames']:\n input_data = pad_sequences([numpy.asarray(instance[feature_name]) for instance in sentences[start:end]], self.max_sentece_length)\n instances.append(input_data)\n if not return_attention:\n predictions = model.predict(instances, verbose=False)\n else:\n attention, predictions = self.label_and_attention(model, instances)\n predictions = predictions.argmax(axis=-1)\n for index, (pred, sentence) in enumerate(zip(predictions, sentences[start:end])):\n sentence_len = len(sentence[feature_name])\n pred_labels.append(pred[-sentence_len:])\n if return_attention:\n att_scores.append(attention[index, -sentence_len:])\n return (numpy.asarray(pred_labels), numpy.asarray(att_scores))\n<|end_body_2|>\n\n<|body_start_3|>\n if 'characters' in self.params['featureNames']:\n self.padCharacters(sentences)\n labels = {}\n attention = {}\n for model_name, model in self.models.items():\n padded_pred_labels, padded_att_scores = self.model_predict(model, sentences, return_attention)\n pred_labels = []\n att_scores = []\n for index, (padded_pred, sentence) in enumerate(zip(padded_pred_labels, sentences)):\n no_pad_tokens = numpy.where(numpy.asarray(sentence['tokens']))[0]\n if no_pad_tokens.max() > padded_pred.shape[0]:\n missing = no_pad_tokens.max() - padded_pred.shape[0]\n pred_labels.append(numpy.pad(padded_pred, (0, missing), 'constant'))\n if return_attention:\n att_scores.append(numpy.pad(padded_att_scores[index], (0, missing), 'constant'))\n else:\n pred_labels.append(padded_pred[no_pad_tokens])\n if return_attention:\n att_scores.append(padded_att_scores[index][no_pad_tokens])\n attention[model_name] = att_scores\n if not translate_labels:\n labels[model_name] = pred_labels\n continue\n idx2Label = self.idx2Labels[model_name]\n labels[model_name] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in pred_labels]\n if return_attention:\n return (labels, attention)\n return labels\n<|end_body_3|>\n", "revision_id": "74a9a09194bebefd8581cfee0676ed7d6bceaf14", "skeleton": "<|skeleton|>\nclass TimePreAttArgBiLSTM:\n \"\"\"Bidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\"\"\"\n\n def addPreAttentionLayer(self, merged_input):\n \"\"\"Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\"\"\"\n <|body_0|>\n\n def label_and_attention(self, model, input_):\n \"\"\"Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\"\"\"\n <|body_1|>\n\n def model_predict(self, model, sentences, return_attention=False, batch_size=32):\n \"\"\"Model probability distribution over set of labels for sentences. Args: model: a Keras model.\"\"\"\n <|body_2|>\n\n def predict(self, sentences, return_attention=False, translate_labels=True):\n \"\"\"Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TimePreAttArgBiLSTM:\n \"\"\"Bidirectional RNN with an attention mechanism. The attention is applied timestep wise before the BiLSTM layer.\"\"\"\n\n def addPreAttentionLayer(self, merged_input):\n \"\"\"Add attention mechanisms to the tensor merged_input. Args: merged_input: 3-dimensional Tensor, where the first dimension corresponds to the batch size, the second to the sequence timesteps and the last one to the concatenation of features. Retruns: 3-dimensional Tensor of the same dimension as merged_input\"\"\"\n activation = self.params.get('attentionActivation', None)\n if activation == 'None':\n activation = None\n feature_vector_size = K.int_shape(merged_input)[-1]\n att_layer = layers.TimeDistributed(layers.Dense(feature_vector_size, activation=activation), name='attention_matrix_score')(merged_input)\n att_layer = layers.Lambda(lambda x: K.mean(x, axis=2), name='attention_vector_score')(att_layer)\n att_layer = layers.Permute((2, 1))(layers.RepeatVector(feature_vector_size)(att_layer))\n merged_input = layers.multiply([att_layer, merged_input])\n return merged_input\n\n def label_and_attention(self, model, input_):\n \"\"\"Classifies the sequences in input_ and returns the attention score. Args: model: a Keras model input_: a list of array representation of sentences. Returns: A tuple where the first element is the attention scores for each sentence, and the second is the model predictions.\"\"\"\n layer = model.get_layer('attention_vector_score')\n attention_model = Model(inputs=model.input, outputs=[layer.output, model.output])\n return attention_model.predict(input_)\n\n def model_predict(self, model, sentences, return_attention=False, batch_size=32):\n \"\"\"Model probability distribution over set of labels for sentences. Args: model: a Keras model.\"\"\"\n pred_labels = []\n att_scores = []\n sentenceLengths = self.getSentenceLengths(sentences)\n for start in range(0, len(sentences), batch_size):\n end = start + batch_size\n instances = []\n for feature_name in self.params['featureNames']:\n input_data = pad_sequences([numpy.asarray(instance[feature_name]) for instance in sentences[start:end]], self.max_sentece_length)\n instances.append(input_data)\n if not return_attention:\n predictions = model.predict(instances, verbose=False)\n else:\n attention, predictions = self.label_and_attention(model, instances)\n predictions = predictions.argmax(axis=-1)\n for index, (pred, sentence) in enumerate(zip(predictions, sentences[start:end])):\n sentence_len = len(sentence[feature_name])\n pred_labels.append(pred[-sentence_len:])\n if return_attention:\n att_scores.append(attention[index, -sentence_len:])\n return (numpy.asarray(pred_labels), numpy.asarray(att_scores))\n\n def predict(self, sentences, return_attention=False, translate_labels=True):\n \"\"\"Distribution over labels for sentences given by all models. Args: sentences: return_attention: if True, return the attention scores for every prediction. translate_labels: if True, replace the numeric value of the labels using the dataset mappings.\"\"\"\n if 'characters' in self.params['featureNames']:\n self.padCharacters(sentences)\n labels = {}\n attention = {}\n for model_name, model in self.models.items():\n padded_pred_labels, padded_att_scores = self.model_predict(model, sentences, return_attention)\n pred_labels = []\n att_scores = []\n for index, (padded_pred, sentence) in enumerate(zip(padded_pred_labels, sentences)):\n no_pad_tokens = numpy.where(numpy.asarray(sentence['tokens']))[0]\n if no_pad_tokens.max() > padded_pred.shape[0]:\n missing = no_pad_tokens.max() - padded_pred.shape[0]\n pred_labels.append(numpy.pad(padded_pred, (0, missing), 'constant'))\n if return_attention:\n att_scores.append(numpy.pad(padded_att_scores[index], (0, missing), 'constant'))\n else:\n pred_labels.append(padded_pred[no_pad_tokens])\n if return_attention:\n att_scores.append(padded_att_scores[index][no_pad_tokens])\n attention[model_name] = att_scores\n if not translate_labels:\n labels[model_name] = pred_labels\n continue\n idx2Label = self.idx2Labels[model_name]\n labels[model_name] = [[idx2Label[tag] for tag in tagSentence] for tagSentence in pred_labels]\n if return_attention:\n return (labels, attention)\n return labels\n", "source": "the_stack_v2_python_sparse", "source_path": "models/att_arg_bilstm.py", "source_repo": "mit0110/argument_mining", "split": "val", "star_events_count": 1} {"blob_id": "4e891eeb643036e61a3a120984403f3a7c9e7a96", "bodies": ["self.generator = generator\nself.commons = pywikibot.Site('commons', 'commons')\nself.repo = pywikibot.Site().data_repository()", "for itempage in self.generator:\n pywikibot.output(u'Working on %s' % (itempage.title(),))\n if not itempage.exists():\n pywikibot.output(u'Item does not exist, skipping')\n continue\n if itempage.isRedirectPage():\n itempage = itempage.getRedirectTarget()\n data = itempage.get()\n claims = data.get('claims')\n sitelinks = data.get('sitelinks')\n if u'P910' in claims:\n pywikibot.output(u\"Item has topic's main category (P910), skipping\")\n continue\n if u'commonswiki' in sitelinks:\n pywikibot.output(u'Item already has a sitelink to Commons, skipping')\n continue\n if u'P373' not in claims:\n pywikibot.output(u'Item seems to be missing Commons category (P373), skipping')\n continue\n commonscategorytitle = claims.get('P373')[0].getTarget()\n commonscategory = pywikibot.Category(self.commons, title=commonscategorytitle)\n if not commonscategory.exists():\n pywikibot.output(u'Commons category %s does not exist, skipping' % (commonscategory.title(),))\n continue\n summary = 'Add sitelink to %s based on Commons category (P373)' % commonscategory.title(asLink=True, insite=self.repo)\n pywikibot.output(summary)\n try:\n itempage.setSitelink(commonscategory, summary=summary)\n except pywikibot.exceptions.OtherPageSaveError:\n pywikibot.output(u'Item save failed, probably a conflicting sitelink, skipping')"], "bodies_text": "<|body_start_0|>\n self.generator = generator\n self.commons = pywikibot.Site('commons', 'commons')\n self.repo = pywikibot.Site().data_repository()\n<|end_body_0|>\n\n<|body_start_1|>\n for itempage in self.generator:\n pywikibot.output(u'Working on %s' % (itempage.title(),))\n if not itempage.exists():\n pywikibot.output(u'Item does not exist, skipping')\n continue\n if itempage.isRedirectPage():\n itempage = itempage.getRedirectTarget()\n data = itempage.get()\n claims = data.get('claims')\n sitelinks = data.get('sitelinks')\n if u'P910' in claims:\n pywikibot.output(u\"Item has topic's main category (P910), skipping\")\n continue\n if u'commonswiki' in sitelinks:\n pywikibot.output(u'Item already has a sitelink to Commons, skipping')\n continue\n if u'P373' not in claims:\n pywikibot.output(u'Item seems to be missing Commons category (P373), skipping')\n continue\n commonscategorytitle = claims.get('P373')[0].getTarget()\n commonscategory = pywikibot.Category(self.commons, title=commonscategorytitle)\n if not commonscategory.exists():\n pywikibot.output(u'Commons category %s does not exist, skipping' % (commonscategory.title(),))\n continue\n summary = 'Add sitelink to %s based on Commons category (P373)' % commonscategory.title(asLink=True, insite=self.repo)\n pywikibot.output(summary)\n try:\n itempage.setSitelink(commonscategory, summary=summary)\n except pywikibot.exceptions.OtherPageSaveError:\n pywikibot.output(u'Item save failed, probably a conflicting sitelink, skipping')\n<|end_body_1|>\n", "class_docstring": "A bot to Commons Category sitelinks", "class_name": "MissingCommonsSitelinkBot", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MissingCommonsSitelinkBot:\n \"\"\"A bot to Commons Category sitelinks\"\"\"\n\n def __init__(self, generator):\n \"\"\"Arguments: * generator - A generator that yields ItemPage objects.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Starts the robot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.generator = generator\n self.commons = pywikibot.Site('commons', 'commons')\n self.repo = pywikibot.Site().data_repository()\n<|end_body_0|>\n\n<|body_start_1|>\n for itempage in self.generator:\n pywikibot.output(u'Working on %s' % (itempage.title(),))\n if not itempage.exists():\n pywikibot.output(u'Item does not exist, skipping')\n continue\n if itempage.isRedirectPage():\n itempage = itempage.getRedirectTarget()\n data = itempage.get()\n claims = data.get('claims')\n sitelinks = data.get('sitelinks')\n if u'P910' in claims:\n pywikibot.output(u\"Item has topic's main category (P910), skipping\")\n continue\n if u'commonswiki' in sitelinks:\n pywikibot.output(u'Item already has a sitelink to Commons, skipping')\n continue\n if u'P373' not in claims:\n pywikibot.output(u'Item seems to be missing Commons category (P373), skipping')\n continue\n commonscategorytitle = claims.get('P373')[0].getTarget()\n commonscategory = pywikibot.Category(self.commons, title=commonscategorytitle)\n if not commonscategory.exists():\n pywikibot.output(u'Commons category %s does not exist, skipping' % (commonscategory.title(),))\n continue\n summary = 'Add sitelink to %s based on Commons category (P373)' % commonscategory.title(asLink=True, insite=self.repo)\n pywikibot.output(summary)\n try:\n itempage.setSitelink(commonscategory, summary=summary)\n except pywikibot.exceptions.OtherPageSaveError:\n pywikibot.output(u'Item save failed, probably a conflicting sitelink, skipping')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000219", "length_bytes": 4707, "license_type": "no_license", "methods": [{"docstring": "Arguments: * generator - A generator that yields ItemPage objects.", "name": "__init__", "signature": "def __init__(self, generator)"}, {"docstring": "Starts the robot.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "prompt": "Implement the Python class `MissingCommonsSitelinkBot` described below.\n\nClass description:\nA bot to Commons Category sitelinks\n\nMethod signatures and docstrings:\n- def __init__(self, generator): Arguments: * generator - A generator that yields ItemPage objects.\n- def run(self): Starts the robot.", "prompted_full_text": "Implement the Python class `MissingCommonsSitelinkBot` described below.\n\nClass description:\nA bot to Commons Category sitelinks\n\nMethod signatures and docstrings:\n- def __init__(self, generator): Arguments: * generator - A generator that yields ItemPage objects.\n- def run(self): Starts the robot.\n\n<|skeleton|>\nclass MissingCommonsSitelinkBot:\n \"\"\"A bot to Commons Category sitelinks\"\"\"\n\n def __init__(self, generator):\n \"\"\"Arguments: * generator - A generator that yields ItemPage objects.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Starts the robot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.generator = generator\n self.commons = pywikibot.Site('commons', 'commons')\n self.repo = pywikibot.Site().data_repository()\n<|end_body_0|>\n\n<|body_start_1|>\n for itempage in self.generator:\n pywikibot.output(u'Working on %s' % (itempage.title(),))\n if not itempage.exists():\n pywikibot.output(u'Item does not exist, skipping')\n continue\n if itempage.isRedirectPage():\n itempage = itempage.getRedirectTarget()\n data = itempage.get()\n claims = data.get('claims')\n sitelinks = data.get('sitelinks')\n if u'P910' in claims:\n pywikibot.output(u\"Item has topic's main category (P910), skipping\")\n continue\n if u'commonswiki' in sitelinks:\n pywikibot.output(u'Item already has a sitelink to Commons, skipping')\n continue\n if u'P373' not in claims:\n pywikibot.output(u'Item seems to be missing Commons category (P373), skipping')\n continue\n commonscategorytitle = claims.get('P373')[0].getTarget()\n commonscategory = pywikibot.Category(self.commons, title=commonscategorytitle)\n if not commonscategory.exists():\n pywikibot.output(u'Commons category %s does not exist, skipping' % (commonscategory.title(),))\n continue\n summary = 'Add sitelink to %s based on Commons category (P373)' % commonscategory.title(asLink=True, insite=self.repo)\n pywikibot.output(summary)\n try:\n itempage.setSitelink(commonscategory, summary=summary)\n except pywikibot.exceptions.OtherPageSaveError:\n pywikibot.output(u'Item save failed, probably a conflicting sitelink, skipping')\n<|end_body_1|>\n", "revision_id": "99a96e49cfe6b2d3151da7ad5469792d80171be3", "skeleton": "<|skeleton|>\nclass MissingCommonsSitelinkBot:\n \"\"\"A bot to Commons Category sitelinks\"\"\"\n\n def __init__(self, generator):\n \"\"\"Arguments: * generator - A generator that yields ItemPage objects.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Starts the robot.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MissingCommonsSitelinkBot:\n \"\"\"A bot to Commons Category sitelinks\"\"\"\n\n def __init__(self, generator):\n \"\"\"Arguments: * generator - A generator that yields ItemPage objects.\"\"\"\n self.generator = generator\n self.commons = pywikibot.Site('commons', 'commons')\n self.repo = pywikibot.Site().data_repository()\n\n def run(self):\n \"\"\"Starts the robot.\"\"\"\n for itempage in self.generator:\n pywikibot.output(u'Working on %s' % (itempage.title(),))\n if not itempage.exists():\n pywikibot.output(u'Item does not exist, skipping')\n continue\n if itempage.isRedirectPage():\n itempage = itempage.getRedirectTarget()\n data = itempage.get()\n claims = data.get('claims')\n sitelinks = data.get('sitelinks')\n if u'P910' in claims:\n pywikibot.output(u\"Item has topic's main category (P910), skipping\")\n continue\n if u'commonswiki' in sitelinks:\n pywikibot.output(u'Item already has a sitelink to Commons, skipping')\n continue\n if u'P373' not in claims:\n pywikibot.output(u'Item seems to be missing Commons category (P373), skipping')\n continue\n commonscategorytitle = claims.get('P373')[0].getTarget()\n commonscategory = pywikibot.Category(self.commons, title=commonscategorytitle)\n if not commonscategory.exists():\n pywikibot.output(u'Commons category %s does not exist, skipping' % (commonscategory.title(),))\n continue\n summary = 'Add sitelink to %s based on Commons category (P373)' % commonscategory.title(asLink=True, insite=self.repo)\n pywikibot.output(summary)\n try:\n itempage.setSitelink(commonscategory, summary=summary)\n except pywikibot.exceptions.OtherPageSaveError:\n pywikibot.output(u'Item save failed, probably a conflicting sitelink, skipping')\n", "source": "the_stack_v2_python_sparse", "source_path": "bot/wikidata/commons_category_missing_sitelink.py", "source_repo": "multichill/toollabs", "split": "val", "star_events_count": 18} {"blob_id": "e034fab7c1b65e338e3668073f1729b8f6e2b25f", "bodies": ["n = len(s)\nif n == 0:\n return s\ndp = [[0] * n for i in range(n)]\nleft = 0\nright = 0\nfor i in range(n - 2, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and right - left < j - i:\n left = i\n right = j\nreturn s[left:right + 1]", "n = len(s)\nif n <= 1:\n return s\nfor i in range(1, n - 2):\n p1 = i"], "bodies_text": "<|body_start_0|>\n n = len(s)\n if n == 0:\n return s\n dp = [[0] * n for i in range(n)]\n left = 0\n right = 0\n for i in range(n - 2, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and right - left < j - i:\n left = i\n right = j\n return s[left:right + 1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n <= 1:\n return s\n for i in range(1, n - 2):\n p1 = i\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\"\"\"\n <|body_0|>\n\n def longestPalindrome1(self, s: str) -> str:\n \"\"\"中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n if n == 0:\n return s\n dp = [[0] * n for i in range(n)]\n left = 0\n right = 0\n for i in range(n - 2, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and right - left < j - i:\n left = i\n right = j\n return s[left:right + 1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n <= 1:\n return s\n for i in range(1, n - 2):\n p1 = i\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000220", "length_bytes": 968, "license_type": "no_license", "methods": [{"docstring": "中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)", "name": "longestPalindrome", "signature": "def longestPalindrome(self, s: str) -> str"}, {"docstring": "中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)", "name": "longestPalindrome1", "signature": "def longestPalindrome1(self, s: str) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome(self, s: str) -> str: 中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\n- def longestPalindrome1(self, s: str) -> str: 中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome(self, s: str) -> str: 中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\n- def longestPalindrome1(self, s: str) -> str: 中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)\n\n<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\"\"\"\n <|body_0|>\n\n def longestPalindrome1(self, s: str) -> str:\n \"\"\"中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n if n == 0:\n return s\n dp = [[0] * n for i in range(n)]\n left = 0\n right = 0\n for i in range(n - 2, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and right - left < j - i:\n left = i\n right = j\n return s[left:right + 1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n <= 1:\n return s\n for i in range(1, n - 2):\n p1 = i\n<|end_body_1|>\n", "revision_id": "95dddb78bccd169d9d219a473627361fe739ab5e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\"\"\"\n <|body_0|>\n\n def longestPalindrome1(self, s: str) -> str:\n \"\"\"中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def longestPalindrome(self, s: str) -> str:\n \"\"\"中级的动态规划——二维关系 时间复杂度 O(n^2) 空间复杂度O(n^2)\"\"\"\n n = len(s)\n if n == 0:\n return s\n dp = [[0] * n for i in range(n)]\n left = 0\n right = 0\n for i in range(n - 2, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, n):\n dp[i][j] = s[i] == s[j] and (j - i < 3 or dp[i + 1][j - 1])\n if dp[i][j] and right - left < j - i:\n left = i\n right = j\n return s[left:right + 1]\n\n def longestPalindrome1(self, s: str) -> str:\n \"\"\"中心扩展的方法 时间复杂度 O(n^2) 空间复杂度O(1)\"\"\"\n n = len(s)\n if n <= 1:\n return s\n for i in range(1, n - 2):\n p1 = i\n", "source": "the_stack_v2_python_sparse", "source_path": "DrasticPlan/longestPalindrome.py", "source_repo": "Philex5/codingPractice", "split": "val", "star_events_count": 0} {"blob_id": "bb83928e9bbd48ca6b15590e67f4a82ef1911845", "bodies": ["number = []\nnow = 0\nfor i in s:\n if i == '+':\n now += 1\n elif now > 1:\n number.append(now)\n now = 0\n else:\n now = 0\nif now > 1:\n number.append(now)\nif not number:\n return False\nn = max(number)\nprint(number)\ntable = [0, 0, 1, 1]\nfor i in range(4, n + 1):\n nums = []\n for j in range((i + 1) // 2):\n nums.append(table[j] ^ table[i - j - 2])\n table.append(self.firstMissingPositive(nums))\nresult = 0\nprint(table)\nfor i in number:\n result ^= table[i]\nif result == 0:\n return False\nelse:\n return True", "if not nums:\n return 1\nindex = 0\nn = len(nums)\nwhile index < n:\n if nums[index] >= 0 and nums[index] < n and (nums[index] != index) and (nums[nums[index]] != nums[index]):\n nums[nums[index]], nums[index] = (nums[index], nums[nums[index]])\n else:\n index += 1\nfor i, v in enumerate(nums):\n if i != v:\n return i\nreturn n"], "bodies_text": "<|body_start_0|>\n number = []\n now = 0\n for i in s:\n if i == '+':\n now += 1\n elif now > 1:\n number.append(now)\n now = 0\n else:\n now = 0\n if now > 1:\n number.append(now)\n if not number:\n return False\n n = max(number)\n print(number)\n table = [0, 0, 1, 1]\n for i in range(4, n + 1):\n nums = []\n for j in range((i + 1) // 2):\n nums.append(table[j] ^ table[i - j - 2])\n table.append(self.firstMissingPositive(nums))\n result = 0\n print(table)\n for i in number:\n result ^= table[i]\n if result == 0:\n return False\n else:\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 1\n index = 0\n n = len(nums)\n while index < n:\n if nums[index] >= 0 and nums[index] < n and (nums[index] != index) and (nums[nums[index]] != nums[index]):\n nums[nums[index]], nums[index] = (nums[index], nums[nums[index]])\n else:\n index += 1\n for i, v in enumerate(nums):\n if i != v:\n return i\n return n\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def canWin(self, s):\n \"\"\":type s: str :rtype: bool\"\"\"\n <|body_0|>\n\n def firstMissingPositive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n number = []\n now = 0\n for i in s:\n if i == '+':\n now += 1\n elif now > 1:\n number.append(now)\n now = 0\n else:\n now = 0\n if now > 1:\n number.append(now)\n if not number:\n return False\n n = max(number)\n print(number)\n table = [0, 0, 1, 1]\n for i in range(4, n + 1):\n nums = []\n for j in range((i + 1) // 2):\n nums.append(table[j] ^ table[i - j - 2])\n table.append(self.firstMissingPositive(nums))\n result = 0\n print(table)\n for i in number:\n result ^= table[i]\n if result == 0:\n return False\n else:\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 1\n index = 0\n n = len(nums)\n while index < n:\n if nums[index] >= 0 and nums[index] < n and (nums[index] != index) and (nums[nums[index]] != nums[index]):\n nums[nums[index]], nums[index] = (nums[index], nums[nums[index]])\n else:\n index += 1\n for i, v in enumerate(nums):\n if i != v:\n return i\n return n\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000221", "length_bytes": 1581, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: bool", "name": "canWin", "signature": "def canWin(self, s)"}, {"docstring": ":type nums: List[int] :rtype: int", "name": "firstMissingPositive", "signature": "def firstMissingPositive(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000111", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canWin(self, s): :type s: str :rtype: bool\n- def firstMissingPositive(self, nums): :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canWin(self, s): :type s: str :rtype: bool\n- def firstMissingPositive(self, nums): :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def canWin(self, s):\n \"\"\":type s: str :rtype: bool\"\"\"\n <|body_0|>\n\n def firstMissingPositive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n number = []\n now = 0\n for i in s:\n if i == '+':\n now += 1\n elif now > 1:\n number.append(now)\n now = 0\n else:\n now = 0\n if now > 1:\n number.append(now)\n if not number:\n return False\n n = max(number)\n print(number)\n table = [0, 0, 1, 1]\n for i in range(4, n + 1):\n nums = []\n for j in range((i + 1) // 2):\n nums.append(table[j] ^ table[i - j - 2])\n table.append(self.firstMissingPositive(nums))\n result = 0\n print(table)\n for i in number:\n result ^= table[i]\n if result == 0:\n return False\n else:\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 1\n index = 0\n n = len(nums)\n while index < n:\n if nums[index] >= 0 and nums[index] < n and (nums[index] != index) and (nums[nums[index]] != nums[index]):\n nums[nums[index]], nums[index] = (nums[index], nums[nums[index]])\n else:\n index += 1\n for i, v in enumerate(nums):\n if i != v:\n return i\n return n\n<|end_body_1|>\n", "revision_id": "62856bf197eedf82b637949bc3fc68a5d94b1768", "skeleton": "<|skeleton|>\nclass Solution:\n\n def canWin(self, s):\n \"\"\":type s: str :rtype: bool\"\"\"\n <|body_0|>\n\n def firstMissingPositive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def canWin(self, s):\n \"\"\":type s: str :rtype: bool\"\"\"\n number = []\n now = 0\n for i in s:\n if i == '+':\n now += 1\n elif now > 1:\n number.append(now)\n now = 0\n else:\n now = 0\n if now > 1:\n number.append(now)\n if not number:\n return False\n n = max(number)\n print(number)\n table = [0, 0, 1, 1]\n for i in range(4, n + 1):\n nums = []\n for j in range((i + 1) // 2):\n nums.append(table[j] ^ table[i - j - 2])\n table.append(self.firstMissingPositive(nums))\n result = 0\n print(table)\n for i in number:\n result ^= table[i]\n if result == 0:\n return False\n else:\n return True\n\n def firstMissingPositive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if not nums:\n return 1\n index = 0\n n = len(nums)\n while index < n:\n if nums[index] >= 0 and nums[index] < n and (nums[index] != index) and (nums[nums[index]] != nums[index]):\n nums[nums[index]], nums[index] = (nums[index], nums[nums[index]])\n else:\n index += 1\n for i, v in enumerate(nums):\n if i != v:\n return i\n return n\n", "source": "the_stack_v2_python_sparse", "source_path": "294. Flip Game II.py", "source_repo": "lxyres/leetcode-python", "split": "val", "star_events_count": 0} {"blob_id": "93e2f7c74ea961ebad6fda4b1e002b5d5bf84956", "bodies": ["super().__init__(args, logger, on_episode_end, log_start_t)\nassert isinstance(self.home_mac, EnsembleMAC), 'Ensemble experiment enforces \"mac\"=ensemble in configuration'\nself.home_mac: EnsembleMAC = self.home_mac", "self.home_mac.load_state_dict(agent=native)\nself.home_mac.load_state_dict(ensemble={0: foreign})\nself.home_learner.build_optimizer()\nself.home_learner.update_targets()"], "bodies_text": "<|body_start_0|>\n super().__init__(args, logger, on_episode_end, log_start_t)\n assert isinstance(self.home_mac, EnsembleMAC), 'Ensemble experiment enforces \"mac\"=ensemble in configuration'\n self.home_mac: EnsembleMAC = self.home_mac\n<|end_body_0|>\n\n<|body_start_1|>\n self.home_mac.load_state_dict(agent=native)\n self.home_mac.load_state_dict(ensemble={0: foreign})\n self.home_learner.build_optimizer()\n self.home_learner.update_targets()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EnsembleExperiment", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnsembleExperiment:\n\n def __init__(self, args, logger, on_episode_end=None, log_start_t=0):\n \"\"\"LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\"\"\"\n <|body_0|>\n\n def load_ensemble(self, native: OrderedDict, foreign: OrderedDict):\n \"\"\"Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(args, logger, on_episode_end, log_start_t)\n assert isinstance(self.home_mac, EnsembleMAC), 'Ensemble experiment enforces \"mac\"=ensemble in configuration'\n self.home_mac: EnsembleMAC = self.home_mac\n<|end_body_0|>\n\n<|body_start_1|>\n self.home_mac.load_state_dict(agent=native)\n self.home_mac.load_state_dict(ensemble={0: foreign})\n self.home_learner.build_optimizer()\n self.home_learner.update_targets()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000222", "length_bytes": 1638, "license_type": "no_license", "methods": [{"docstring": "LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:", "name": "__init__", "signature": "def __init__(self, args, logger, on_episode_end=None, log_start_t=0)"}, {"docstring": "Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:", "name": "load_ensemble", "signature": "def load_ensemble(self, native: OrderedDict, foreign: OrderedDict)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004064", "prompt": "Implement the Python class `EnsembleExperiment` described below.\n\nClass description:\nImplement the EnsembleExperiment class.\n\nMethod signatures and docstrings:\n- def __init__(self, args, logger, on_episode_end=None, log_start_t=0): LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\n- def load_ensemble(self, native: OrderedDict, foreign: OrderedDict): Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:", "prompted_full_text": "Implement the Python class `EnsembleExperiment` described below.\n\nClass description:\nImplement the EnsembleExperiment class.\n\nMethod signatures and docstrings:\n- def __init__(self, args, logger, on_episode_end=None, log_start_t=0): LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\n- def load_ensemble(self, native: OrderedDict, foreign: OrderedDict): Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:\n\n<|skeleton|>\nclass EnsembleExperiment:\n\n def __init__(self, args, logger, on_episode_end=None, log_start_t=0):\n \"\"\"LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\"\"\"\n <|body_0|>\n\n def load_ensemble(self, native: OrderedDict, foreign: OrderedDict):\n \"\"\"Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(args, logger, on_episode_end, log_start_t)\n assert isinstance(self.home_mac, EnsembleMAC), 'Ensemble experiment enforces \"mac\"=ensemble in configuration'\n self.home_mac: EnsembleMAC = self.home_mac\n<|end_body_0|>\n\n<|body_start_1|>\n self.home_mac.load_state_dict(agent=native)\n self.home_mac.load_state_dict(ensemble={0: foreign})\n self.home_learner.build_optimizer()\n self.home_learner.update_targets()\n<|end_body_1|>\n", "revision_id": "c5c65992140c0fd61218513eb197189d560798cc", "skeleton": "<|skeleton|>\nclass EnsembleExperiment:\n\n def __init__(self, args, logger, on_episode_end=None, log_start_t=0):\n \"\"\"LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\"\"\"\n <|body_0|>\n\n def load_ensemble(self, native: OrderedDict, foreign: OrderedDict):\n \"\"\"Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EnsembleExperiment:\n def __init__(self, args, logger, on_episode_end=None, log_start_t=0):\n \"\"\"LeaguePlay performs training of a single multi-agent and offers loading of new adversarial agents. :param args: :param logger: :param on_episode_end:\"\"\"\n super().__init__(args, logger, on_episode_end, log_start_t)\n assert isinstance(self.home_mac, EnsembleMAC), 'Ensemble experiment enforces \"mac\"=ensemble in configuration'\n self.home_mac: EnsembleMAC = self.home_mac\n\n def load_ensemble(self, native: OrderedDict, foreign: OrderedDict):\n \"\"\"Build an dual ensemble where parts of the native agent infer with the foreign agent :param native: state dict of a foreign network :param foreign: state dict of the home network :return:\"\"\"\n self.home_mac.load_state_dict(agent=native)\n self.home_mac.load_state_dict(ensemble={0: foreign})\n self.home_learner.build_optimizer()\n self.home_learner.update_targets()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/runs/train/ensemble_experiment.py", "source_repo": "PMatthaei/ma-league", "split": "val", "star_events_count": 0} {"blob_id": "981287fb679a01c68bd55345c85b4383efa1ec18", "bodies": ["fileName = '10Lines'\nexpectedResult = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\nactuatlResponse = PSPQuickSortInput.getArray(fileName)\nself.assertTrue(expectedResult, actuatlResponse)", "fileName = '10Lines1'\nactuatlResponse = PSPQuickSortInput.getArray(fileName)\nself.assertTrue(actuatlResponse)", "fileName = 'empty'\nexpectedResult = '\\n The file is empty \\n'\nactuatlResponse = PSPQuickSortInput.getArray(fileName)\nself.assertTrue(actuatlResponse)", "fileName = '10ContStrings'\nexpectedResult = '\\n The Lines contain Strings \\n'\nactuatlResponse = PSPQuickSortInput.getArray(fileName)\nself.assertTrue(actuatlResponse)", "fileName = '10ContBlanks'\nexpectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\nactuatlResponse = PSPQuickSortInput.getArray(fileName)\nself.assertTrue(expectedResult, actuatlResponse)", "unsortedArray = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\nexpectedResult = [1.0, 5.0, 5.5, 9.0, 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\nactuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\nself.assertEqual(expectedResult, actuatlResponse)"], "bodies_text": "<|body_start_0|>\n fileName = '10Lines'\n expectedResult = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_0|>\n\n<|body_start_1|>\n fileName = '10Lines1'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_1|>\n\n<|body_start_2|>\n fileName = 'empty'\n expectedResult = '\\n The file is empty \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_2|>\n\n<|body_start_3|>\n fileName = '10ContStrings'\n expectedResult = '\\n The Lines contain Strings \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_3|>\n\n<|body_start_4|>\n fileName = '10ContBlanks'\n expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_4|>\n\n<|body_start_5|>\n unsortedArray = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0, 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n self.assertEqual(expectedResult, actuatlResponse)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "TestStringMethods", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestStringMethods:\n\n def test_getArray_success_with_valid_values(self):\n \"\"\"This is testing for normal files\"\"\"\n <|body_0|>\n\n def test_getArray_error_with_not_existing_file(self):\n \"\"\"This is test for Non Existing file\"\"\"\n <|body_1|>\n\n def test_getArray_error_with_empty_value(self):\n \"\"\"This is for testing empty files\"\"\"\n <|body_2|>\n\n def test_getArray_error_inlude_strings(self):\n \"\"\"This is for testing array including strings\"\"\"\n <|body_3|>\n\n def test_getArray_success_contain_blanks(self):\n \"\"\"This is for testing blank lines\"\"\"\n <|body_4|>\n\n def test_sort_success_return_sortedArray(self):\n \"\"\"This is the test for getting sortedArray\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fileName = '10Lines'\n expectedResult = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_0|>\n\n<|body_start_1|>\n fileName = '10Lines1'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_1|>\n\n<|body_start_2|>\n fileName = 'empty'\n expectedResult = '\\n The file is empty \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_2|>\n\n<|body_start_3|>\n fileName = '10ContStrings'\n expectedResult = '\\n The Lines contain Strings \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_3|>\n\n<|body_start_4|>\n fileName = '10ContBlanks'\n expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_4|>\n\n<|body_start_5|>\n unsortedArray = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0, 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n self.assertEqual(expectedResult, actuatlResponse)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000223", "length_bytes": 2577, "license_type": "no_license", "methods": [{"docstring": "This is testing for normal files", "name": "test_getArray_success_with_valid_values", "signature": "def test_getArray_success_with_valid_values(self)"}, {"docstring": "This is test for Non Existing file", "name": "test_getArray_error_with_not_existing_file", "signature": "def test_getArray_error_with_not_existing_file(self)"}, {"docstring": "This is for testing empty files", "name": "test_getArray_error_with_empty_value", "signature": "def test_getArray_error_with_empty_value(self)"}, {"docstring": "This is for testing array including strings", "name": "test_getArray_error_inlude_strings", "signature": "def test_getArray_error_inlude_strings(self)"}, {"docstring": "This is for testing blank lines", "name": "test_getArray_success_contain_blanks", "signature": "def test_getArray_success_contain_blanks(self)"}, {"docstring": "This is the test for getting sortedArray", "name": "test_sort_success_return_sortedArray", "signature": "def test_sort_success_return_sortedArray(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_005301", "prompt": "Implement the Python class `TestStringMethods` described below.\n\nClass description:\nImplement the TestStringMethods class.\n\nMethod signatures and docstrings:\n- def test_getArray_success_with_valid_values(self): This is testing for normal files\n- def test_getArray_error_with_not_existing_file(self): This is test for Non Existing file\n- def test_getArray_error_with_empty_value(self): This is for testing empty files\n- def test_getArray_error_inlude_strings(self): This is for testing array including strings\n- def test_getArray_success_contain_blanks(self): This is for testing blank lines\n- def test_sort_success_return_sortedArray(self): This is the test for getting sortedArray", "prompted_full_text": "Implement the Python class `TestStringMethods` described below.\n\nClass description:\nImplement the TestStringMethods class.\n\nMethod signatures and docstrings:\n- def test_getArray_success_with_valid_values(self): This is testing for normal files\n- def test_getArray_error_with_not_existing_file(self): This is test for Non Existing file\n- def test_getArray_error_with_empty_value(self): This is for testing empty files\n- def test_getArray_error_inlude_strings(self): This is for testing array including strings\n- def test_getArray_success_contain_blanks(self): This is for testing blank lines\n- def test_sort_success_return_sortedArray(self): This is the test for getting sortedArray\n\n<|skeleton|>\nclass TestStringMethods:\n\n def test_getArray_success_with_valid_values(self):\n \"\"\"This is testing for normal files\"\"\"\n <|body_0|>\n\n def test_getArray_error_with_not_existing_file(self):\n \"\"\"This is test for Non Existing file\"\"\"\n <|body_1|>\n\n def test_getArray_error_with_empty_value(self):\n \"\"\"This is for testing empty files\"\"\"\n <|body_2|>\n\n def test_getArray_error_inlude_strings(self):\n \"\"\"This is for testing array including strings\"\"\"\n <|body_3|>\n\n def test_getArray_success_contain_blanks(self):\n \"\"\"This is for testing blank lines\"\"\"\n <|body_4|>\n\n def test_sort_success_return_sortedArray(self):\n \"\"\"This is the test for getting sortedArray\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fileName = '10Lines'\n expectedResult = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_0|>\n\n<|body_start_1|>\n fileName = '10Lines1'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_1|>\n\n<|body_start_2|>\n fileName = 'empty'\n expectedResult = '\\n The file is empty \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_2|>\n\n<|body_start_3|>\n fileName = '10ContStrings'\n expectedResult = '\\n The Lines contain Strings \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n<|end_body_3|>\n\n<|body_start_4|>\n fileName = '10ContBlanks'\n expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n<|end_body_4|>\n\n<|body_start_5|>\n unsortedArray = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0, 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n self.assertEqual(expectedResult, actuatlResponse)\n<|end_body_5|>\n", "revision_id": "72181672d800ec59bac06978cab08a59e734933e", "skeleton": "<|skeleton|>\nclass TestStringMethods:\n\n def test_getArray_success_with_valid_values(self):\n \"\"\"This is testing for normal files\"\"\"\n <|body_0|>\n\n def test_getArray_error_with_not_existing_file(self):\n \"\"\"This is test for Non Existing file\"\"\"\n <|body_1|>\n\n def test_getArray_error_with_empty_value(self):\n \"\"\"This is for testing empty files\"\"\"\n <|body_2|>\n\n def test_getArray_error_inlude_strings(self):\n \"\"\"This is for testing array including strings\"\"\"\n <|body_3|>\n\n def test_getArray_success_contain_blanks(self):\n \"\"\"This is for testing blank lines\"\"\"\n <|body_4|>\n\n def test_sort_success_return_sortedArray(self):\n \"\"\"This is the test for getting sortedArray\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestStringMethods:\n def test_getArray_success_with_valid_values(self):\n \"\"\"This is testing for normal files\"\"\"\n fileName = '10Lines'\n expectedResult = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n\n def test_getArray_error_with_not_existing_file(self):\n \"\"\"This is test for Non Existing file\"\"\"\n fileName = '10Lines1'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n\n def test_getArray_error_with_empty_value(self):\n \"\"\"This is for testing empty files\"\"\"\n fileName = 'empty'\n expectedResult = '\\n The file is empty \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n\n def test_getArray_error_inlude_strings(self):\n \"\"\"This is for testing array including strings\"\"\"\n fileName = '10ContStrings'\n expectedResult = '\\n The Lines contain Strings \\n'\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(actuatlResponse)\n\n def test_getArray_success_contain_blanks(self):\n \"\"\"This is for testing blank lines\"\"\"\n fileName = '10ContBlanks'\n expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n self.assertTrue(expectedResult, actuatlResponse)\n\n def test_sort_success_return_sortedArray(self):\n \"\"\"This is the test for getting sortedArray\"\"\"\n unsortedArray = [12.0, 13.5, 1.0, 5.5, 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0, 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n self.assertEqual(expectedResult, actuatlResponse)\n", "source": "the_stack_v2_python_sparse", "source_path": "02_PSP/PSP/unitest.py", "source_repo": "yemarn510/YM_Python", "split": "val", "star_events_count": 0} {"blob_id": "f1ba499debd71c1f75cc9519fad89284ae9dea20", "bodies": ["def predict_fn(model_config: ml_collections.FrozenConfigDict, model_params: Dict[str, Any], model_vars: Dict[str, Any], batch: Dict[str, Any]) -> Dict[str, Array]:\n \"\"\"Model-specific prediction function.\n\n Args:\n model_config: contains model config hyperparameters.\n model_params: contains model parameters.\n model_vars: contains model variables (not optimized).\n batch: model input.\n\n Returns:\n Dict[str, Array]. predictions.\n \"\"\"\n variable_dict = {'params': model_params}\n variable_dict.update(model_vars)\n loss_helpers, _ = cls.build_model(model_config).apply(variable_dict, batch, deterministic=True, rngs=None)\n return loss_helpers['memory_generation']\nreturn predict_fn", "max_length = config.model_config.encoder_config.max_length\nmention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config)\n\ndef preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n \"\"\"Performs preprocessing for individual sample.\"\"\"\n new_example = mention_preprocessing_fn(example)\n new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(example['text_ids'], max_length)\n return new_example\nreturn preprocess_fn", "mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config)\nmin_distance_from_passage_boundary = config.min_distance_from_passage_boundary\nbsz = config.per_device_batch_size\nmax_mentions_per_sample = config.max_mentions_per_sample\n\ndef collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n new_batch = mention_collater_fn(batch)\n new_batch['target_text_identifiers'] = tf.gather(new_batch['text_identifiers'], new_batch['mention_target_batch_positions'])\n new_batch['target_mention_hashes'] = mention_preprocess_utils.modified_cantor_pairing(new_batch['mention_target_start_positions'], new_batch['target_text_identifiers'])\n seq_len = tf.shape(batch['text_ids'])[1]\n starts_far_from_passage_boundary = tf.greater_equal(new_batch['mention_target_start_positions'], min_distance_from_passage_boundary)\n ends_far_from_passage_boundary = tf.less(new_batch['mention_target_end_positions'], tf.cast(seq_len, new_batch['mention_target_end_positions'].dtype) - min_distance_from_passage_boundary)\n far_from_passage_boundary = tf.logical_and(starts_far_from_passage_boundary, ends_far_from_passage_boundary)\n far_from_passage_boundary = tf.cast(far_from_passage_boundary, dtype=new_batch['mention_target_weights'].dtype)\n new_batch['mention_target_weights'] = new_batch['mention_target_weights'] * far_from_passage_boundary\n unique_mention_ids = []\n dense_mention_ids = batch['dense_mention_ids'] * batch['dense_mention_mask']\n for i in range(bsz):\n unique_mention_ids_per_i = tf.unique(dense_mention_ids[i]).y\n unique_mention_ids_per_i = tf.cast(unique_mention_ids_per_i, tf.int32)\n unique_mention_ids_per_i = mention_preprocess_utils.dynamic_padding_1d(unique_mention_ids_per_i, max_mentions_per_sample)\n unique_mention_ids.append(unique_mention_ids_per_i)\n new_batch['unique_mention_ids'] = tf.stack(unique_mention_ids)\n return new_batch\nreturn collater_fn", "dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)\nmention_position_shape = config.max_mentions * config.per_device_batch_size\nint_type = jnp.int32\ndummy_input['target_text_identifiers'] = jnp.ones(mention_position_shape, int_type)\ndummy_input['target_mention_hashes'] = jnp.ones(mention_position_shape, int_type)\ndummy_input['unique_mention_ids'] = jnp.ones((config.per_device_batch_size, config.max_mentions_per_sample), int_type)\nreturn dummy_input", "encoder_name = config.model_config.encoder_name\nencoder_class = encoder_registry.get_registered_encoder(encoder_name)\nencoder_variables = encoder_class.load_weights(config)\nmodel_variables = {}\nfor group_key in encoder_variables:\n model_variables[group_key] = {'encoder': encoder_variables[group_key]}\nreturn model_variables"], "bodies_text": "<|body_start_0|>\n def predict_fn(model_config: ml_collections.FrozenConfigDict, model_params: Dict[str, Any], model_vars: Dict[str, Any], batch: Dict[str, Any]) -> Dict[str, Array]:\n \"\"\"Model-specific prediction function.\n\n Args:\n model_config: contains model config hyperparameters.\n model_params: contains model parameters.\n model_vars: contains model variables (not optimized).\n batch: model input.\n\n Returns:\n Dict[str, Array]. predictions.\n \"\"\"\n variable_dict = {'params': model_params}\n variable_dict.update(model_vars)\n loss_helpers, _ = cls.build_model(model_config).apply(variable_dict, batch, deterministic=True, rngs=None)\n return loss_helpers['memory_generation']\n return predict_fn\n<|end_body_0|>\n\n<|body_start_1|>\n max_length = config.model_config.encoder_config.max_length\n mention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config)\n\n def preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n \"\"\"Performs preprocessing for individual sample.\"\"\"\n new_example = mention_preprocessing_fn(example)\n new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(example['text_ids'], max_length)\n return new_example\n return preprocess_fn\n<|end_body_1|>\n\n<|body_start_2|>\n mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config)\n min_distance_from_passage_boundary = config.min_distance_from_passage_boundary\n bsz = config.per_device_batch_size\n max_mentions_per_sample = config.max_mentions_per_sample\n\n def collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n new_batch = mention_collater_fn(batch)\n new_batch['target_text_identifiers'] = tf.gather(new_batch['text_identifiers'], new_batch['mention_target_batch_positions'])\n new_batch['target_mention_hashes'] = mention_preprocess_utils.modified_cantor_pairing(new_batch['mention_target_start_positions'], new_batch['target_text_identifiers'])\n seq_len = tf.shape(batch['text_ids'])[1]\n starts_far_from_passage_boundary = tf.greater_equal(new_batch['mention_target_start_positions'], min_distance_from_passage_boundary)\n ends_far_from_passage_boundary = tf.less(new_batch['mention_target_end_positions'], tf.cast(seq_len, new_batch['mention_target_end_positions'].dtype) - min_distance_from_passage_boundary)\n far_from_passage_boundary = tf.logical_and(starts_far_from_passage_boundary, ends_far_from_passage_boundary)\n far_from_passage_boundary = tf.cast(far_from_passage_boundary, dtype=new_batch['mention_target_weights'].dtype)\n new_batch['mention_target_weights'] = new_batch['mention_target_weights'] * far_from_passage_boundary\n unique_mention_ids = []\n dense_mention_ids = batch['dense_mention_ids'] * batch['dense_mention_mask']\n for i in range(bsz):\n unique_mention_ids_per_i = tf.unique(dense_mention_ids[i]).y\n unique_mention_ids_per_i = tf.cast(unique_mention_ids_per_i, tf.int32)\n unique_mention_ids_per_i = mention_preprocess_utils.dynamic_padding_1d(unique_mention_ids_per_i, max_mentions_per_sample)\n unique_mention_ids.append(unique_mention_ids_per_i)\n new_batch['unique_mention_ids'] = tf.stack(unique_mention_ids)\n return new_batch\n return collater_fn\n<|end_body_2|>\n\n<|body_start_3|>\n dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)\n mention_position_shape = config.max_mentions * config.per_device_batch_size\n int_type = jnp.int32\n dummy_input['target_text_identifiers'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['target_mention_hashes'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['unique_mention_ids'] = jnp.ones((config.per_device_batch_size, config.max_mentions_per_sample), int_type)\n return dummy_input\n<|end_body_3|>\n\n<|body_start_4|>\n encoder_name = config.model_config.encoder_name\n encoder_class = encoder_registry.get_registered_encoder(encoder_name)\n encoder_variables = encoder_class.load_weights(config)\n model_variables = {}\n for group_key in encoder_variables:\n model_variables[group_key] = {'encoder': encoder_variables[group_key]}\n return model_variables\n<|end_body_4|>\n", "class_docstring": "Task that generates memory from the corpus using an encoder.", "class_name": "MemoryGenerationTask", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MemoryGenerationTask:\n \"\"\"Task that generates memory from the corpus using an encoder.\"\"\"\n\n def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]:\n \"\"\"Creates task prediction function for inference.\"\"\"\n <|body_0|>\n\n def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_1|>\n\n def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_2|>\n\n def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Produces model-specific dummy input batch. See BaseTask.\"\"\"\n <|body_3|>\n\n def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Load model weights.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def predict_fn(model_config: ml_collections.FrozenConfigDict, model_params: Dict[str, Any], model_vars: Dict[str, Any], batch: Dict[str, Any]) -> Dict[str, Array]:\n \"\"\"Model-specific prediction function.\n\n Args:\n model_config: contains model config hyperparameters.\n model_params: contains model parameters.\n model_vars: contains model variables (not optimized).\n batch: model input.\n\n Returns:\n Dict[str, Array]. predictions.\n \"\"\"\n variable_dict = {'params': model_params}\n variable_dict.update(model_vars)\n loss_helpers, _ = cls.build_model(model_config).apply(variable_dict, batch, deterministic=True, rngs=None)\n return loss_helpers['memory_generation']\n return predict_fn\n<|end_body_0|>\n\n<|body_start_1|>\n max_length = config.model_config.encoder_config.max_length\n mention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config)\n\n def preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n \"\"\"Performs preprocessing for individual sample.\"\"\"\n new_example = mention_preprocessing_fn(example)\n new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(example['text_ids'], max_length)\n return new_example\n return preprocess_fn\n<|end_body_1|>\n\n<|body_start_2|>\n mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config)\n min_distance_from_passage_boundary = config.min_distance_from_passage_boundary\n bsz = config.per_device_batch_size\n max_mentions_per_sample = config.max_mentions_per_sample\n\n def collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n new_batch = mention_collater_fn(batch)\n new_batch['target_text_identifiers'] = tf.gather(new_batch['text_identifiers'], new_batch['mention_target_batch_positions'])\n new_batch['target_mention_hashes'] = mention_preprocess_utils.modified_cantor_pairing(new_batch['mention_target_start_positions'], new_batch['target_text_identifiers'])\n seq_len = tf.shape(batch['text_ids'])[1]\n starts_far_from_passage_boundary = tf.greater_equal(new_batch['mention_target_start_positions'], min_distance_from_passage_boundary)\n ends_far_from_passage_boundary = tf.less(new_batch['mention_target_end_positions'], tf.cast(seq_len, new_batch['mention_target_end_positions'].dtype) - min_distance_from_passage_boundary)\n far_from_passage_boundary = tf.logical_and(starts_far_from_passage_boundary, ends_far_from_passage_boundary)\n far_from_passage_boundary = tf.cast(far_from_passage_boundary, dtype=new_batch['mention_target_weights'].dtype)\n new_batch['mention_target_weights'] = new_batch['mention_target_weights'] * far_from_passage_boundary\n unique_mention_ids = []\n dense_mention_ids = batch['dense_mention_ids'] * batch['dense_mention_mask']\n for i in range(bsz):\n unique_mention_ids_per_i = tf.unique(dense_mention_ids[i]).y\n unique_mention_ids_per_i = tf.cast(unique_mention_ids_per_i, tf.int32)\n unique_mention_ids_per_i = mention_preprocess_utils.dynamic_padding_1d(unique_mention_ids_per_i, max_mentions_per_sample)\n unique_mention_ids.append(unique_mention_ids_per_i)\n new_batch['unique_mention_ids'] = tf.stack(unique_mention_ids)\n return new_batch\n return collater_fn\n<|end_body_2|>\n\n<|body_start_3|>\n dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)\n mention_position_shape = config.max_mentions * config.per_device_batch_size\n int_type = jnp.int32\n dummy_input['target_text_identifiers'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['target_mention_hashes'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['unique_mention_ids'] = jnp.ones((config.per_device_batch_size, config.max_mentions_per_sample), int_type)\n return dummy_input\n<|end_body_3|>\n\n<|body_start_4|>\n encoder_name = config.model_config.encoder_name\n encoder_class = encoder_registry.get_registered_encoder(encoder_name)\n encoder_variables = encoder_class.load_weights(config)\n model_variables = {}\n for group_key in encoder_variables:\n model_variables[group_key] = {'encoder': encoder_variables[group_key]}\n return model_variables\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000224", "length_bytes": 14047, "license_type": "permissive", "methods": [{"docstring": "Creates task prediction function for inference.", "name": "make_prediction_fn", "signature": "def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]"}, {"docstring": "Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).", "name": "make_preprocess_fn", "signature": "def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]"}, {"docstring": "Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).", "name": "make_collater_fn", "signature": "def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]"}, {"docstring": "Produces model-specific dummy input batch. See BaseTask.", "name": "dummy_input", "signature": "def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]"}, {"docstring": "Load model weights.", "name": "load_weights", "signature": "def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]"}], "n_methods": 5, "prompt": "Implement the Python class `MemoryGenerationTask` described below.\n\nClass description:\nTask that generates memory from the corpus using an encoder.\n\nMethod signatures and docstrings:\n- def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]: Creates task prediction function for inference.\n- def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]: Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\n- def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]: Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\n- def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]: Produces model-specific dummy input batch. See BaseTask.\n- def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]: Load model weights.", "prompted_full_text": "Implement the Python class `MemoryGenerationTask` described below.\n\nClass description:\nTask that generates memory from the corpus using an encoder.\n\nMethod signatures and docstrings:\n- def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]: Creates task prediction function for inference.\n- def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]: Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\n- def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]: Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\n- def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]: Produces model-specific dummy input batch. See BaseTask.\n- def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]: Load model weights.\n\n<|skeleton|>\nclass MemoryGenerationTask:\n \"\"\"Task that generates memory from the corpus using an encoder.\"\"\"\n\n def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]:\n \"\"\"Creates task prediction function for inference.\"\"\"\n <|body_0|>\n\n def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_1|>\n\n def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_2|>\n\n def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Produces model-specific dummy input batch. See BaseTask.\"\"\"\n <|body_3|>\n\n def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Load model weights.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def predict_fn(model_config: ml_collections.FrozenConfigDict, model_params: Dict[str, Any], model_vars: Dict[str, Any], batch: Dict[str, Any]) -> Dict[str, Array]:\n \"\"\"Model-specific prediction function.\n\n Args:\n model_config: contains model config hyperparameters.\n model_params: contains model parameters.\n model_vars: contains model variables (not optimized).\n batch: model input.\n\n Returns:\n Dict[str, Array]. predictions.\n \"\"\"\n variable_dict = {'params': model_params}\n variable_dict.update(model_vars)\n loss_helpers, _ = cls.build_model(model_config).apply(variable_dict, batch, deterministic=True, rngs=None)\n return loss_helpers['memory_generation']\n return predict_fn\n<|end_body_0|>\n\n<|body_start_1|>\n max_length = config.model_config.encoder_config.max_length\n mention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config)\n\n def preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n \"\"\"Performs preprocessing for individual sample.\"\"\"\n new_example = mention_preprocessing_fn(example)\n new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(example['text_ids'], max_length)\n return new_example\n return preprocess_fn\n<|end_body_1|>\n\n<|body_start_2|>\n mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config)\n min_distance_from_passage_boundary = config.min_distance_from_passage_boundary\n bsz = config.per_device_batch_size\n max_mentions_per_sample = config.max_mentions_per_sample\n\n def collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n new_batch = mention_collater_fn(batch)\n new_batch['target_text_identifiers'] = tf.gather(new_batch['text_identifiers'], new_batch['mention_target_batch_positions'])\n new_batch['target_mention_hashes'] = mention_preprocess_utils.modified_cantor_pairing(new_batch['mention_target_start_positions'], new_batch['target_text_identifiers'])\n seq_len = tf.shape(batch['text_ids'])[1]\n starts_far_from_passage_boundary = tf.greater_equal(new_batch['mention_target_start_positions'], min_distance_from_passage_boundary)\n ends_far_from_passage_boundary = tf.less(new_batch['mention_target_end_positions'], tf.cast(seq_len, new_batch['mention_target_end_positions'].dtype) - min_distance_from_passage_boundary)\n far_from_passage_boundary = tf.logical_and(starts_far_from_passage_boundary, ends_far_from_passage_boundary)\n far_from_passage_boundary = tf.cast(far_from_passage_boundary, dtype=new_batch['mention_target_weights'].dtype)\n new_batch['mention_target_weights'] = new_batch['mention_target_weights'] * far_from_passage_boundary\n unique_mention_ids = []\n dense_mention_ids = batch['dense_mention_ids'] * batch['dense_mention_mask']\n for i in range(bsz):\n unique_mention_ids_per_i = tf.unique(dense_mention_ids[i]).y\n unique_mention_ids_per_i = tf.cast(unique_mention_ids_per_i, tf.int32)\n unique_mention_ids_per_i = mention_preprocess_utils.dynamic_padding_1d(unique_mention_ids_per_i, max_mentions_per_sample)\n unique_mention_ids.append(unique_mention_ids_per_i)\n new_batch['unique_mention_ids'] = tf.stack(unique_mention_ids)\n return new_batch\n return collater_fn\n<|end_body_2|>\n\n<|body_start_3|>\n dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)\n mention_position_shape = config.max_mentions * config.per_device_batch_size\n int_type = jnp.int32\n dummy_input['target_text_identifiers'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['target_mention_hashes'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['unique_mention_ids'] = jnp.ones((config.per_device_batch_size, config.max_mentions_per_sample), int_type)\n return dummy_input\n<|end_body_3|>\n\n<|body_start_4|>\n encoder_name = config.model_config.encoder_name\n encoder_class = encoder_registry.get_registered_encoder(encoder_name)\n encoder_variables = encoder_class.load_weights(config)\n model_variables = {}\n for group_key in encoder_variables:\n model_variables[group_key] = {'encoder': encoder_variables[group_key]}\n return model_variables\n<|end_body_4|>\n", "revision_id": "ac9447064195e06de48cc91ff642f7fffa28ffe8", "skeleton": "<|skeleton|>\nclass MemoryGenerationTask:\n \"\"\"Task that generates memory from the corpus using an encoder.\"\"\"\n\n def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]:\n \"\"\"Creates task prediction function for inference.\"\"\"\n <|body_0|>\n\n def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_1|>\n\n def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n <|body_2|>\n\n def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Produces model-specific dummy input batch. See BaseTask.\"\"\"\n <|body_3|>\n\n def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Load model weights.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MemoryGenerationTask:\n \"\"\"Task that generates memory from the corpus using an encoder.\"\"\"\n\n def make_prediction_fn(cls, config: ml_collections.ConfigDict) -> Callable[..., Dict[str, Array]]:\n \"\"\"Creates task prediction function for inference.\"\"\"\n def predict_fn(model_config: ml_collections.FrozenConfigDict, model_params: Dict[str, Any], model_vars: Dict[str, Any], batch: Dict[str, Any]) -> Dict[str, Array]:\n \"\"\"Model-specific prediction function.\n\n Args:\n model_config: contains model config hyperparameters.\n model_params: contains model parameters.\n model_vars: contains model variables (not optimized).\n batch: model input.\n\n Returns:\n Dict[str, Array]. predictions.\n \"\"\"\n variable_dict = {'params': model_params}\n variable_dict.update(model_vars)\n loss_helpers, _ = cls.build_model(model_config).apply(variable_dict, batch, deterministic=True, rngs=None)\n return loss_helpers['memory_generation']\n return predict_fn\n\n def make_preprocess_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess samples. See BaseTask. Here we add a text identifier hash to the standard MentionEncoderTask preprocessing pipeline. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses samples to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n max_length = config.model_config.encoder_config.max_length\n mention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config)\n\n def preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n \"\"\"Performs preprocessing for individual sample.\"\"\"\n new_example = mention_preprocessing_fn(example)\n new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(example['text_ids'], max_length)\n return new_example\n return preprocess_fn\n\n def make_collater_fn(config: ml_collections.ConfigDict) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:\n \"\"\"Produces function to preprocess batches. See BaseTask. Batches text identifiers after standard mention preprocessing. Also masks out mentions that are too close to a passage boundary, and for which we may not have enough context to generate a meaningful encoding. Args: config: contains experiment hyperparameters. Returns: Function that preprocesses batches to be usable for the model (mod casting from tf to jnp dtype).\"\"\"\n mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config)\n min_distance_from_passage_boundary = config.min_distance_from_passage_boundary\n bsz = config.per_device_batch_size\n max_mentions_per_sample = config.max_mentions_per_sample\n\n def collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n new_batch = mention_collater_fn(batch)\n new_batch['target_text_identifiers'] = tf.gather(new_batch['text_identifiers'], new_batch['mention_target_batch_positions'])\n new_batch['target_mention_hashes'] = mention_preprocess_utils.modified_cantor_pairing(new_batch['mention_target_start_positions'], new_batch['target_text_identifiers'])\n seq_len = tf.shape(batch['text_ids'])[1]\n starts_far_from_passage_boundary = tf.greater_equal(new_batch['mention_target_start_positions'], min_distance_from_passage_boundary)\n ends_far_from_passage_boundary = tf.less(new_batch['mention_target_end_positions'], tf.cast(seq_len, new_batch['mention_target_end_positions'].dtype) - min_distance_from_passage_boundary)\n far_from_passage_boundary = tf.logical_and(starts_far_from_passage_boundary, ends_far_from_passage_boundary)\n far_from_passage_boundary = tf.cast(far_from_passage_boundary, dtype=new_batch['mention_target_weights'].dtype)\n new_batch['mention_target_weights'] = new_batch['mention_target_weights'] * far_from_passage_boundary\n unique_mention_ids = []\n dense_mention_ids = batch['dense_mention_ids'] * batch['dense_mention_mask']\n for i in range(bsz):\n unique_mention_ids_per_i = tf.unique(dense_mention_ids[i]).y\n unique_mention_ids_per_i = tf.cast(unique_mention_ids_per_i, tf.int32)\n unique_mention_ids_per_i = mention_preprocess_utils.dynamic_padding_1d(unique_mention_ids_per_i, max_mentions_per_sample)\n unique_mention_ids.append(unique_mention_ids_per_i)\n new_batch['unique_mention_ids'] = tf.stack(unique_mention_ids)\n return new_batch\n return collater_fn\n\n def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Produces model-specific dummy input batch. See BaseTask.\"\"\"\n dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)\n mention_position_shape = config.max_mentions * config.per_device_batch_size\n int_type = jnp.int32\n dummy_input['target_text_identifiers'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['target_mention_hashes'] = jnp.ones(mention_position_shape, int_type)\n dummy_input['unique_mention_ids'] = jnp.ones((config.per_device_batch_size, config.max_mentions_per_sample), int_type)\n return dummy_input\n\n def load_weights(config: ml_collections.ConfigDict) -> Dict[str, Any]:\n \"\"\"Load model weights.\"\"\"\n encoder_name = config.model_config.encoder_name\n encoder_class = encoder_registry.get_registered_encoder(encoder_name)\n encoder_variables = encoder_class.load_weights(config)\n model_variables = {}\n for group_key in encoder_variables:\n model_variables[group_key] = {'encoder': encoder_variables[group_key]}\n return model_variables\n", "source": "the_stack_v2_python_sparse", "source_path": "language/mentionmemory/tasks/memory_generation_task.py", "source_repo": "google-research/language", "split": "val", "star_events_count": 1567} {"blob_id": "289ff93490bc1ab6b257a3b874a32827886fa064", "bodies": ["hashed = hashlib.sha256(string.encode())\nhex_of_hashed = hashed.hexdigest()\nreturn hex_of_hashed", "hashed = hashlib.sha256(string.encode())\nhex_of_string = hashed.hexdigest()\nhashed = hashlib.sha256(candidate.encode())\nhex_of_candidate = hashed.hexdigest()\nreturn hex_of_string == hex_of_string"], "bodies_text": "<|body_start_0|>\n hashed = hashlib.sha256(string.encode())\n hex_of_hashed = hashed.hexdigest()\n return hex_of_hashed\n<|end_body_0|>\n\n<|body_start_1|>\n hashed = hashlib.sha256(string.encode())\n hex_of_string = hashed.hexdigest()\n hashed = hashlib.sha256(candidate.encode())\n hex_of_candidate = hashed.hexdigest()\n return hex_of_string == hex_of_string\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Hasher", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Hasher:\n\n def makeHash(string):\n \"\"\"returns the hash value of string input\"\"\"\n <|body_0|>\n\n def checkHash(string, candidate):\n \"\"\"string : hash value candidate : the input to check with hash value returns True if equal else False\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hashed = hashlib.sha256(string.encode())\n hex_of_hashed = hashed.hexdigest()\n return hex_of_hashed\n<|end_body_0|>\n\n<|body_start_1|>\n hashed = hashlib.sha256(string.encode())\n hex_of_string = hashed.hexdigest()\n hashed = hashlib.sha256(candidate.encode())\n hex_of_candidate = hashed.hexdigest()\n return hex_of_string == hex_of_string\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000225", "length_bytes": 1325, "license_type": "no_license", "methods": [{"docstring": "returns the hash value of string input", "name": "makeHash", "signature": "def makeHash(string)"}, {"docstring": "string : hash value candidate : the input to check with hash value returns True if equal else False", "name": "checkHash", "signature": "def checkHash(string, candidate)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002303", "prompt": "Implement the Python class `Hasher` described below.\n\nClass description:\nImplement the Hasher class.\n\nMethod signatures and docstrings:\n- def makeHash(string): returns the hash value of string input\n- def checkHash(string, candidate): string : hash value candidate : the input to check with hash value returns True if equal else False", "prompted_full_text": "Implement the Python class `Hasher` described below.\n\nClass description:\nImplement the Hasher class.\n\nMethod signatures and docstrings:\n- def makeHash(string): returns the hash value of string input\n- def checkHash(string, candidate): string : hash value candidate : the input to check with hash value returns True if equal else False\n\n<|skeleton|>\nclass Hasher:\n\n def makeHash(string):\n \"\"\"returns the hash value of string input\"\"\"\n <|body_0|>\n\n def checkHash(string, candidate):\n \"\"\"string : hash value candidate : the input to check with hash value returns True if equal else False\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n hashed = hashlib.sha256(string.encode())\n hex_of_hashed = hashed.hexdigest()\n return hex_of_hashed\n<|end_body_0|>\n\n<|body_start_1|>\n hashed = hashlib.sha256(string.encode())\n hex_of_string = hashed.hexdigest()\n hashed = hashlib.sha256(candidate.encode())\n hex_of_candidate = hashed.hexdigest()\n return hex_of_string == hex_of_string\n<|end_body_1|>\n", "revision_id": "a3500acd8efb41aeefbadbff966f956a9f1e7766", "skeleton": "<|skeleton|>\nclass Hasher:\n\n def makeHash(string):\n \"\"\"returns the hash value of string input\"\"\"\n <|body_0|>\n\n def checkHash(string, candidate):\n \"\"\"string : hash value candidate : the input to check with hash value returns True if equal else False\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Hasher:\n def makeHash(string):\n \"\"\"returns the hash value of string input\"\"\"\n hashed = hashlib.sha256(string.encode())\n hex_of_hashed = hashed.hexdigest()\n return hex_of_hashed\n\n def checkHash(string, candidate):\n \"\"\"string : hash value candidate : the input to check with hash value returns True if equal else False\"\"\"\n hashed = hashlib.sha256(string.encode())\n hex_of_string = hashed.hexdigest()\n hashed = hashlib.sha256(candidate.encode())\n hex_of_candidate = hashed.hexdigest()\n return hex_of_string == hex_of_string\n", "source": "the_stack_v2_python_sparse", "source_path": "Tourino/helpers.py", "source_repo": "zerobits01/Tourino", "split": "val", "star_events_count": 0} {"blob_id": "8df9985608b2a9310d1b7175d79b3c793966dc94", "bodies": ["if notes_gui.showing:\n ctx.tags = []\n notes_gui.hide()\nelse:\n update_notes()\n ctx.tags = ['user.notes_showing']\n notes_gui.show()", "curtime = datetime.now().strftime('%Y-%m-%d %H%M%S')\nfile_path = NOTES_DIR / f'{curtime}.txt'\nfile_path.touch()\nsubprocess.Popen(['notepad', str(file_path)])\nupdate_notes()", "global notes_by_number\nglobal notes_by_filename\nassert n <= len(notes_by_number)\nnote = notes_by_number[n]\narchive_name = f'{note.path.stem} - {note.heading}.txt'\nshutil.move(note.path, ARCHIVE_DIR / archive_name)\nnote.path.unlink()\ndel notes_by_number[n]\ndel notes_by_filename[note.path.name]", "global notes_by_number\nassert n <= len(notes_by_number)\nsubprocess.Popen(['notepad', str(notes_by_number[n].path)])"], "bodies_text": "<|body_start_0|>\n if notes_gui.showing:\n ctx.tags = []\n notes_gui.hide()\n else:\n update_notes()\n ctx.tags = ['user.notes_showing']\n notes_gui.show()\n<|end_body_0|>\n\n<|body_start_1|>\n curtime = datetime.now().strftime('%Y-%m-%d %H%M%S')\n file_path = NOTES_DIR / f'{curtime}.txt'\n file_path.touch()\n subprocess.Popen(['notepad', str(file_path)])\n update_notes()\n<|end_body_1|>\n\n<|body_start_2|>\n global notes_by_number\n global notes_by_filename\n assert n <= len(notes_by_number)\n note = notes_by_number[n]\n archive_name = f'{note.path.stem} - {note.heading}.txt'\n shutil.move(note.path, ARCHIVE_DIR / archive_name)\n note.path.unlink()\n del notes_by_number[n]\n del notes_by_filename[note.path.name]\n<|end_body_2|>\n\n<|body_start_3|>\n global notes_by_number\n assert n <= len(notes_by_number)\n subprocess.Popen(['notepad', str(notes_by_number[n].path)])\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Actions", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Actions:\n\n def notes_gui_toggle():\n \"\"\"Toggle the notes gui\"\"\"\n <|body_0|>\n\n def create_note():\n \"\"\"Create a new note\"\"\"\n <|body_1|>\n\n def delete_note(n: int):\n \"\"\"Delete note number n\"\"\"\n <|body_2|>\n\n def show_note(n: int):\n \"\"\"Show note number n\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if notes_gui.showing:\n ctx.tags = []\n notes_gui.hide()\n else:\n update_notes()\n ctx.tags = ['user.notes_showing']\n notes_gui.show()\n<|end_body_0|>\n\n<|body_start_1|>\n curtime = datetime.now().strftime('%Y-%m-%d %H%M%S')\n file_path = NOTES_DIR / f'{curtime}.txt'\n file_path.touch()\n subprocess.Popen(['notepad', str(file_path)])\n update_notes()\n<|end_body_1|>\n\n<|body_start_2|>\n global notes_by_number\n global notes_by_filename\n assert n <= len(notes_by_number)\n note = notes_by_number[n]\n archive_name = f'{note.path.stem} - {note.heading}.txt'\n shutil.move(note.path, ARCHIVE_DIR / archive_name)\n note.path.unlink()\n del notes_by_number[n]\n del notes_by_filename[note.path.name]\n<|end_body_2|>\n\n<|body_start_3|>\n global notes_by_number\n assert n <= len(notes_by_number)\n subprocess.Popen(['notepad', str(notes_by_number[n].path)])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000226", "length_bytes": 3193, "license_type": "no_license", "methods": [{"docstring": "Toggle the notes gui", "name": "notes_gui_toggle", "signature": "def notes_gui_toggle()"}, {"docstring": "Create a new note", "name": "create_note", "signature": "def create_note()"}, {"docstring": "Delete note number n", "name": "delete_note", "signature": "def delete_note(n: int)"}, {"docstring": "Show note number n", "name": "show_note", "signature": "def show_note(n: int)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003027", "prompt": "Implement the Python class `Actions` described below.\n\nClass description:\nImplement the Actions class.\n\nMethod signatures and docstrings:\n- def notes_gui_toggle(): Toggle the notes gui\n- def create_note(): Create a new note\n- def delete_note(n: int): Delete note number n\n- def show_note(n: int): Show note number n", "prompted_full_text": "Implement the Python class `Actions` described below.\n\nClass description:\nImplement the Actions class.\n\nMethod signatures and docstrings:\n- def notes_gui_toggle(): Toggle the notes gui\n- def create_note(): Create a new note\n- def delete_note(n: int): Delete note number n\n- def show_note(n: int): Show note number n\n\n<|skeleton|>\nclass Actions:\n\n def notes_gui_toggle():\n \"\"\"Toggle the notes gui\"\"\"\n <|body_0|>\n\n def create_note():\n \"\"\"Create a new note\"\"\"\n <|body_1|>\n\n def delete_note(n: int):\n \"\"\"Delete note number n\"\"\"\n <|body_2|>\n\n def show_note(n: int):\n \"\"\"Show note number n\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if notes_gui.showing:\n ctx.tags = []\n notes_gui.hide()\n else:\n update_notes()\n ctx.tags = ['user.notes_showing']\n notes_gui.show()\n<|end_body_0|>\n\n<|body_start_1|>\n curtime = datetime.now().strftime('%Y-%m-%d %H%M%S')\n file_path = NOTES_DIR / f'{curtime}.txt'\n file_path.touch()\n subprocess.Popen(['notepad', str(file_path)])\n update_notes()\n<|end_body_1|>\n\n<|body_start_2|>\n global notes_by_number\n global notes_by_filename\n assert n <= len(notes_by_number)\n note = notes_by_number[n]\n archive_name = f'{note.path.stem} - {note.heading}.txt'\n shutil.move(note.path, ARCHIVE_DIR / archive_name)\n note.path.unlink()\n del notes_by_number[n]\n del notes_by_filename[note.path.name]\n<|end_body_2|>\n\n<|body_start_3|>\n global notes_by_number\n assert n <= len(notes_by_number)\n subprocess.Popen(['notepad', str(notes_by_number[n].path)])\n<|end_body_3|>\n", "revision_id": "03c6479989ab4231d8ae6bbab24ac8b57c3ef809", "skeleton": "<|skeleton|>\nclass Actions:\n\n def notes_gui_toggle():\n \"\"\"Toggle the notes gui\"\"\"\n <|body_0|>\n\n def create_note():\n \"\"\"Create a new note\"\"\"\n <|body_1|>\n\n def delete_note(n: int):\n \"\"\"Delete note number n\"\"\"\n <|body_2|>\n\n def show_note(n: int):\n \"\"\"Show note number n\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Actions:\n def notes_gui_toggle():\n \"\"\"Toggle the notes gui\"\"\"\n if notes_gui.showing:\n ctx.tags = []\n notes_gui.hide()\n else:\n update_notes()\n ctx.tags = ['user.notes_showing']\n notes_gui.show()\n\n def create_note():\n \"\"\"Create a new note\"\"\"\n curtime = datetime.now().strftime('%Y-%m-%d %H%M%S')\n file_path = NOTES_DIR / f'{curtime}.txt'\n file_path.touch()\n subprocess.Popen(['notepad', str(file_path)])\n update_notes()\n\n def delete_note(n: int):\n \"\"\"Delete note number n\"\"\"\n global notes_by_number\n global notes_by_filename\n assert n <= len(notes_by_number)\n note = notes_by_number[n]\n archive_name = f'{note.path.stem} - {note.heading}.txt'\n shutil.move(note.path, ARCHIVE_DIR / archive_name)\n note.path.unlink()\n del notes_by_number[n]\n del notes_by_filename[note.path.name]\n\n def show_note(n: int):\n \"\"\"Show note number n\"\"\"\n global notes_by_number\n assert n <= len(notes_by_number)\n subprocess.Popen(['notepad', str(notes_by_number[n].path)])\n", "source": "the_stack_v2_python_sparse", "source_path": "gui/notes/notes_gui.py", "source_repo": "mrob95/MR-talon", "split": "val", "star_events_count": 15} {"blob_id": "24c0eca60e4b90b60e8991bbd27f5d587f97dfc2", "bodies": ["pc = DotDict()\npc.process_type = 'plugin'\npijd = copy.deepcopy(cannonical_json_dump)\npc.json_dump = pijd\npc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\nf2jd = copy.deepcopy(cannonical_json_dump)\npc.upload_file_minidump_flash2 = DotDict()\npc.upload_file_minidump_flash2.json_dump = f2jd\nfake_processor = create_basic_fake_processor()\nrc = DotDict()\nrule = SetWindowPos()\naction_result = rule.action(rc, pc, fake_processor)\nself.assertTrue(action_result)\nself.assertTrue('classifications' in pc)\nself.assertTrue('skunk_works' in pc.classifications)\nself.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')", "pc = DotDict()\npc.process_type = 'plugin'\npijd = copy.deepcopy(cannonical_json_dump)\npc.json_dump = pijd\npc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\npc.json_dump['threads'][0]['frames'][4]['function'] = 'F_1378698112'\nf2jd = copy.deepcopy(cannonical_json_dump)\npc.upload_file_minidump_flash2 = DotDict()\npc.upload_file_minidump_flash2.json_dump = f2jd\nfake_processor = create_basic_fake_processor()\nrc = DotDict()\nrule = SetWindowPos()\naction_result = rule.action(rc, pc, fake_processor)\nself.assertTrue(action_result)\nself.assertTrue('classifications' in pc)\nself.assertTrue('skunk_works' in pc.classifications)\nself.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')", "pc = DotDict()\npc.dump = DotDict()\npijd = copy.deepcopy(cannonical_json_dump)\npc.dump.json_dump = pijd\nf2jd = copy.deepcopy(cannonical_json_dump)\npc.upload_file_minidump_flash2 = DotDict()\npc.upload_file_minidump_flash2.json_dump = f2jd\npc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\npc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][4]['function'] = 'F455544145'\nfake_processor = create_basic_fake_processor()\nrc = DotDict()\nrule = SetWindowPos()\naction_result = rule.action(rc, pc, fake_processor)\nself.assertTrue(action_result)\nself.assertTrue('classifications' in pc)\nself.assertTrue('skunk_works' in pc.classifications)\nself.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F455544145')", "pc = DotDict()\npc.dump = DotDict()\npijd = copy.deepcopy(cannonical_json_dump)\npc.dump.json_dump = pijd\nf2jd = copy.deepcopy(cannonical_json_dump)\npc.upload_file_minidump_flash2 = DotDict()\npc.upload_file_minidump_flash2.json_dump = f2jd\npc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\nfake_processor = create_basic_fake_processor()\nrc = DotDict()\nrule = SetWindowPos()\naction_result = rule.action(rc, pc, fake_processor)\nself.assertTrue(action_result)\nself.assertTrue('classifications' in pc)\nself.assertTrue('skunk_works' in pc.classifications)\nself.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')", "pc = DotDict()\npc.dump = DotDict()\npijd = copy.deepcopy(cannonical_json_dump)\npc.dump.json_dump = pijd\nf2jd = copy.deepcopy(cannonical_json_dump)\npc.upload_file_minidump_flash2 = DotDict()\npc.upload_file_minidump_flash2.json_dump = f2jd\nfake_processor = create_basic_fake_processor()\nrc = DotDict()\nrule = SetWindowPos()\naction_result = rule.action(rc, pc, fake_processor)\nself.assertFalse(action_result)\nself.assertFalse('classifications' in pc)"], "bodies_text": "<|body_start_0|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_0|>\n\n<|body_start_1|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.json_dump['threads'][0]['frames'][4]['function'] = 'F_1378698112'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')\n<|end_body_1|>\n\n<|body_start_2|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][4]['function'] = 'F455544145'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F455544145')\n<|end_body_2|>\n\n<|body_start_3|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_3|>\n\n<|body_start_4|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertFalse(action_result)\n self.assertFalse('classifications' in pc)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "TestSetWindowPos", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestSetWindowPos:\n\n def test_action_case_1(self):\n \"\"\"sentinel exsits in stack, but no secondaries\"\"\"\n <|body_0|>\n\n def test_action_case_2(self):\n \"\"\"sentinel exsits in stack, plus one secondary\"\"\"\n <|body_1|>\n\n def test_action_case_3(self):\n \"\"\"nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_2|>\n\n def test_action_case_4(self):\n \"\"\"nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_3|>\n\n def test_action_case_5(self):\n \"\"\"nothing in either dump\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_0|>\n\n<|body_start_1|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.json_dump['threads'][0]['frames'][4]['function'] = 'F_1378698112'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')\n<|end_body_1|>\n\n<|body_start_2|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][4]['function'] = 'F455544145'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F455544145')\n<|end_body_2|>\n\n<|body_start_3|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_3|>\n\n<|body_start_4|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertFalse(action_result)\n self.assertFalse('classifications' in pc)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000227", "length_bytes": 27276, "license_type": "no_license", "methods": [{"docstring": "sentinel exsits in stack, but no secondaries", "name": "test_action_case_1", "signature": "def test_action_case_1(self)"}, {"docstring": "sentinel exsits in stack, plus one secondary", "name": "test_action_case_2", "signature": "def test_action_case_2(self)"}, {"docstring": "nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump", "name": "test_action_case_3", "signature": "def test_action_case_3(self)"}, {"docstring": "nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump", "name": "test_action_case_4", "signature": "def test_action_case_4(self)"}, {"docstring": "nothing in either dump", "name": "test_action_case_5", "signature": "def test_action_case_5(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001562", "prompt": "Implement the Python class `TestSetWindowPos` described below.\n\nClass description:\nImplement the TestSetWindowPos class.\n\nMethod signatures and docstrings:\n- def test_action_case_1(self): sentinel exsits in stack, but no secondaries\n- def test_action_case_2(self): sentinel exsits in stack, plus one secondary\n- def test_action_case_3(self): nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\n- def test_action_case_4(self): nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\n- def test_action_case_5(self): nothing in either dump", "prompted_full_text": "Implement the Python class `TestSetWindowPos` described below.\n\nClass description:\nImplement the TestSetWindowPos class.\n\nMethod signatures and docstrings:\n- def test_action_case_1(self): sentinel exsits in stack, but no secondaries\n- def test_action_case_2(self): sentinel exsits in stack, plus one secondary\n- def test_action_case_3(self): nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\n- def test_action_case_4(self): nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\n- def test_action_case_5(self): nothing in either dump\n\n<|skeleton|>\nclass TestSetWindowPos:\n\n def test_action_case_1(self):\n \"\"\"sentinel exsits in stack, but no secondaries\"\"\"\n <|body_0|>\n\n def test_action_case_2(self):\n \"\"\"sentinel exsits in stack, plus one secondary\"\"\"\n <|body_1|>\n\n def test_action_case_3(self):\n \"\"\"nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_2|>\n\n def test_action_case_4(self):\n \"\"\"nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_3|>\n\n def test_action_case_5(self):\n \"\"\"nothing in either dump\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_0|>\n\n<|body_start_1|>\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.json_dump['threads'][0]['frames'][4]['function'] = 'F_1378698112'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')\n<|end_body_1|>\n\n<|body_start_2|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][4]['function'] = 'F455544145'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F455544145')\n<|end_body_2|>\n\n<|body_start_3|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n<|end_body_3|>\n\n<|body_start_4|>\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertFalse(action_result)\n self.assertFalse('classifications' in pc)\n<|end_body_4|>\n", "revision_id": "9c9b7701d7ddf9f3cbba1a4d0aa65758e8b49528", "skeleton": "<|skeleton|>\nclass TestSetWindowPos:\n\n def test_action_case_1(self):\n \"\"\"sentinel exsits in stack, but no secondaries\"\"\"\n <|body_0|>\n\n def test_action_case_2(self):\n \"\"\"sentinel exsits in stack, plus one secondary\"\"\"\n <|body_1|>\n\n def test_action_case_3(self):\n \"\"\"nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_2|>\n\n def test_action_case_4(self):\n \"\"\"nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\"\"\"\n <|body_3|>\n\n def test_action_case_5(self):\n \"\"\"nothing in either dump\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestSetWindowPos:\n def test_action_case_1(self):\n \"\"\"sentinel exsits in stack, but no secondaries\"\"\"\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n\n def test_action_case_2(self):\n \"\"\"sentinel exsits in stack, plus one secondary\"\"\"\n pc = DotDict()\n pc.process_type = 'plugin'\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.json_dump = pijd\n pc.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.json_dump['threads'][0]['frames'][4]['function'] = 'F_1378698112'\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F_1378698112')\n\n def test_action_case_3(self):\n \"\"\"nothing in 1st dump, sentinel and secondary in upload_file_minidump_flash2 dump\"\"\"\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][4]['function'] = 'F455544145'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | F455544145')\n\n def test_action_case_4(self):\n \"\"\"nothing in 1st dump, sentinel but no secondary in upload_file_minidump_flash2 dump\"\"\"\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n pc.upload_file_minidump_flash2.json_dump['threads'][0]['frames'][2]['function'] = 'NtUserSetWindowPos'\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertTrue(action_result)\n self.assertTrue('classifications' in pc)\n self.assertTrue('skunk_works' in pc.classifications)\n self.assertEqual(pc.classifications.skunk_works.classification, 'NtUserSetWindowPos | other')\n\n def test_action_case_5(self):\n \"\"\"nothing in either dump\"\"\"\n pc = DotDict()\n pc.dump = DotDict()\n pijd = copy.deepcopy(cannonical_json_dump)\n pc.dump.json_dump = pijd\n f2jd = copy.deepcopy(cannonical_json_dump)\n pc.upload_file_minidump_flash2 = DotDict()\n pc.upload_file_minidump_flash2.json_dump = f2jd\n fake_processor = create_basic_fake_processor()\n rc = DotDict()\n rule = SetWindowPos()\n action_result = rule.action(rc, pc, fake_processor)\n self.assertFalse(action_result)\n self.assertFalse('classifications' in pc)\n", "source": "the_stack_v2_python_sparse", "source_path": "socorro/unittest/processor/test_skunk_classifiers.py", "source_repo": "v1ka5/socorro", "split": "val", "star_events_count": 0} {"blob_id": "2645f710df68b335f7985f6567067055440cd2ac", "bodies": ["prefix_list_entry = []\nif entries:\n for entry in entries:\n subnet, min_len, max_len, action = entry\n prefix_list_entry.append({'{}_entry'.format(cls.typeof): {'action': action, 'max_prefix_length': max_len, 'min_prefix_length': min_len, 'subnet': subnet}})\ncls.json = {'name': name, 'entries': prefix_list_entry}\nreturn ElementCreator(cls)", "json = {'{}_entry'.format(self.typeof): {'action': action, 'min_prefix_length': min_prefix_length, 'max_prefix_length': max_prefix_length, 'subnet': subnet}}\nacl = search.element_by_href_as_smcresult(self.href)\nacl.json.get('entries').append(json)\nprepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()", "acl = search.element_by_href_as_smcresult(self.href)\nacl.json['entries'][:] = [entry for entry in acl.json.get('entries') if entry.get('{}_entry'.format(self.typeof)).get('subnet') != subnet]\nprepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()", "acl = search.element_by_href_as_smcresult(self.href)\nacls = []\nfor entry in acl.json.get('entries'):\n e = entry.get('{}_entry'.format(self.typeof))\n acls.append((e.get('subnet'), e.get('min_prefix_length'), e.get('max_prefix_length'), e.get('action')))\nreturn acls"], "bodies_text": "<|body_start_0|>\n prefix_list_entry = []\n if entries:\n for entry in entries:\n subnet, min_len, max_len, action = entry\n prefix_list_entry.append({'{}_entry'.format(cls.typeof): {'action': action, 'max_prefix_length': max_len, 'min_prefix_length': min_len, 'subnet': subnet}})\n cls.json = {'name': name, 'entries': prefix_list_entry}\n return ElementCreator(cls)\n<|end_body_0|>\n\n<|body_start_1|>\n json = {'{}_entry'.format(self.typeof): {'action': action, 'min_prefix_length': min_prefix_length, 'max_prefix_length': max_prefix_length, 'subnet': subnet}}\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json.get('entries').append(json)\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_1|>\n\n<|body_start_2|>\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json['entries'][:] = [entry for entry in acl.json.get('entries') if entry.get('{}_entry'.format(self.typeof)).get('subnet') != subnet]\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_2|>\n\n<|body_start_3|>\n acl = search.element_by_href_as_smcresult(self.href)\n acls = []\n for entry in acl.json.get('entries'):\n e = entry.get('{}_entry'.format(self.typeof))\n acls.append((e.get('subnet'), e.get('min_prefix_length'), e.get('max_prefix_length'), e.get('action')))\n return acls\n<|end_body_3|>\n", "class_docstring": "PrefixList provides common methods utilized by all prefix list operations", "class_name": "PrefixList", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrefixList:\n \"\"\"PrefixList provides common methods utilized by all prefix list operations\"\"\"\n\n def create(cls, name, entries=None):\n \"\"\"Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\"\"\"\n <|body_0|>\n\n def add_entry(self, subnet, min_prefix_length, max_prefix_length, action):\n \"\"\"Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\"\"\"\n <|body_1|>\n\n def remove_entry(self, subnet):\n \"\"\"Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\"\"\"\n <|body_2|>\n\n def view(self):\n \"\"\"Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prefix_list_entry = []\n if entries:\n for entry in entries:\n subnet, min_len, max_len, action = entry\n prefix_list_entry.append({'{}_entry'.format(cls.typeof): {'action': action, 'max_prefix_length': max_len, 'min_prefix_length': min_len, 'subnet': subnet}})\n cls.json = {'name': name, 'entries': prefix_list_entry}\n return ElementCreator(cls)\n<|end_body_0|>\n\n<|body_start_1|>\n json = {'{}_entry'.format(self.typeof): {'action': action, 'min_prefix_length': min_prefix_length, 'max_prefix_length': max_prefix_length, 'subnet': subnet}}\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json.get('entries').append(json)\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_1|>\n\n<|body_start_2|>\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json['entries'][:] = [entry for entry in acl.json.get('entries') if entry.get('{}_entry'.format(self.typeof)).get('subnet') != subnet]\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_2|>\n\n<|body_start_3|>\n acl = search.element_by_href_as_smcresult(self.href)\n acls = []\n for entry in acl.json.get('entries'):\n e = entry.get('{}_entry'.format(self.typeof))\n acls.append((e.get('subnet'), e.get('min_prefix_length'), e.get('max_prefix_length'), e.get('action')))\n return acls\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000228", "length_bytes": 4764, "license_type": "permissive", "methods": [{"docstring": "Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])", "name": "create", "signature": "def create(cls, name, entries=None)"}, {"docstring": "Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None", "name": "add_entry", "signature": "def add_entry(self, subnet, min_prefix_length, max_prefix_length, action)"}, {"docstring": "Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None", "name": "remove_entry", "signature": "def remove_entry(self, subnet)"}, {"docstring": "Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple", "name": "view", "signature": "def view(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006567", "prompt": "Implement the Python class `PrefixList` described below.\n\nClass description:\nPrefixList provides common methods utilized by all prefix list operations\n\nMethod signatures and docstrings:\n- def create(cls, name, entries=None): Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\n- def add_entry(self, subnet, min_prefix_length, max_prefix_length, action): Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\n- def remove_entry(self, subnet): Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\n- def view(self): Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple", "prompted_full_text": "Implement the Python class `PrefixList` described below.\n\nClass description:\nPrefixList provides common methods utilized by all prefix list operations\n\nMethod signatures and docstrings:\n- def create(cls, name, entries=None): Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\n- def add_entry(self, subnet, min_prefix_length, max_prefix_length, action): Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\n- def remove_entry(self, subnet): Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\n- def view(self): Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple\n\n<|skeleton|>\nclass PrefixList:\n \"\"\"PrefixList provides common methods utilized by all prefix list operations\"\"\"\n\n def create(cls, name, entries=None):\n \"\"\"Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\"\"\"\n <|body_0|>\n\n def add_entry(self, subnet, min_prefix_length, max_prefix_length, action):\n \"\"\"Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\"\"\"\n <|body_1|>\n\n def remove_entry(self, subnet):\n \"\"\"Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\"\"\"\n <|body_2|>\n\n def view(self):\n \"\"\"Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prefix_list_entry = []\n if entries:\n for entry in entries:\n subnet, min_len, max_len, action = entry\n prefix_list_entry.append({'{}_entry'.format(cls.typeof): {'action': action, 'max_prefix_length': max_len, 'min_prefix_length': min_len, 'subnet': subnet}})\n cls.json = {'name': name, 'entries': prefix_list_entry}\n return ElementCreator(cls)\n<|end_body_0|>\n\n<|body_start_1|>\n json = {'{}_entry'.format(self.typeof): {'action': action, 'min_prefix_length': min_prefix_length, 'max_prefix_length': max_prefix_length, 'subnet': subnet}}\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json.get('entries').append(json)\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_1|>\n\n<|body_start_2|>\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json['entries'][:] = [entry for entry in acl.json.get('entries') if entry.get('{}_entry'.format(self.typeof)).get('subnet') != subnet]\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n<|end_body_2|>\n\n<|body_start_3|>\n acl = search.element_by_href_as_smcresult(self.href)\n acls = []\n for entry in acl.json.get('entries'):\n e = entry.get('{}_entry'.format(self.typeof))\n acls.append((e.get('subnet'), e.get('min_prefix_length'), e.get('max_prefix_length'), e.get('action')))\n return acls\n<|end_body_3|>\n", "revision_id": "ae9894648787f7e81dca05de6b697c94e0f17d32", "skeleton": "<|skeleton|>\nclass PrefixList:\n \"\"\"PrefixList provides common methods utilized by all prefix list operations\"\"\"\n\n def create(cls, name, entries=None):\n \"\"\"Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\"\"\"\n <|body_0|>\n\n def add_entry(self, subnet, min_prefix_length, max_prefix_length, action):\n \"\"\"Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\"\"\"\n <|body_1|>\n\n def remove_entry(self, subnet):\n \"\"\"Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\"\"\"\n <|body_2|>\n\n def view(self):\n \"\"\"Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PrefixList:\n \"\"\"PrefixList provides common methods utilized by all prefix list operations\"\"\"\n\n def create(cls, name, entries=None):\n \"\"\"Create an IPv4 or IPv6 Prefix List Entries should be a 4-tuple consisting of (subnet, min_prefix_len, max_prefix_len, action). Action values are 'permit' or 'deny'. For example:: IPPrefixList.create( name='poo', entries=[('10.0.0.0/8', 16, 32, 'deny'), ('192.16.1.0/24', 25, 32, 'permit')]) IPv6PrefixList.create( name='v6prefix', entries=[('ab00::/64', 65, 128, 'deny')])\"\"\"\n prefix_list_entry = []\n if entries:\n for entry in entries:\n subnet, min_len, max_len, action = entry\n prefix_list_entry.append({'{}_entry'.format(cls.typeof): {'action': action, 'max_prefix_length': max_len, 'min_prefix_length': min_len, 'subnet': subnet}})\n cls.json = {'name': name, 'entries': prefix_list_entry}\n return ElementCreator(cls)\n\n def add_entry(self, subnet, min_prefix_length, max_prefix_length, action):\n \"\"\"Add an entry to an PrefixList :param str subnet: network address in cidr format :param int min_prefix_length: minimum mask bits :param int max_prefix_length: maximum mask bits :param str action: permit|deny :raises: :py:class: `smc.api.exceptions.ElementNotFound`: invalid element reference :raises: :py:class: 'smc.api.exceptions.ModificationFailed`: invalid entry :return: None\"\"\"\n json = {'{}_entry'.format(self.typeof): {'action': action, 'min_prefix_length': min_prefix_length, 'max_prefix_length': max_prefix_length, 'subnet': subnet}}\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json.get('entries').append(json)\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n\n def remove_entry(self, subnet):\n \"\"\"Remove an PrefixList entry by subnet :param str subnet: subnet match to remove :raises: :py:class: `smc.api.exceptions.ModificationFailed` :return: None\"\"\"\n acl = search.element_by_href_as_smcresult(self.href)\n acl.json['entries'][:] = [entry for entry in acl.json.get('entries') if entry.get('{}_entry'.format(self.typeof)).get('subnet') != subnet]\n prepared_request(ModificationFailed, href=self.href, json=acl.json, etag=acl.etag).update()\n\n def view(self):\n \"\"\"Return a view of the IP Access List in tuple format: (subnet, min_prefix_length, max_prefix_length, action) :return: list tuple\"\"\"\n acl = search.element_by_href_as_smcresult(self.href)\n acls = []\n for entry in acl.json.get('entries'):\n e = entry.get('{}_entry'.format(self.typeof))\n acls.append((e.get('subnet'), e.get('min_prefix_length'), e.get('max_prefix_length'), e.get('action')))\n return acls\n", "source": "the_stack_v2_python_sparse", "source_path": "smc/routing/prefix_list.py", "source_repo": "m4h3/smc-python", "split": "val", "star_events_count": 0} {"blob_id": "0bd5dbee5dcf106c7690f7cdd17094a9481beab4", "bodies": ["self.is_highly_available = is_highly_available\nself.version = version\nself.vm_backup_status = vm_backup_status\nself.vm_backup_type = vm_backup_type", "if dictionary is None:\n return None\nis_highly_available = dictionary.get('isHighlyAvailable')\nversion = dictionary.get('version')\nvm_backup_status = dictionary.get('vmBackupStatus')\nvm_backup_type = dictionary.get('vmBackupType')\nreturn cls(is_highly_available, version, vm_backup_status, vm_backup_type)"], "bodies_text": "<|body_start_0|>\n self.is_highly_available = is_highly_available\n self.version = version\n self.vm_backup_status = vm_backup_status\n self.vm_backup_type = vm_backup_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_highly_available = dictionary.get('isHighlyAvailable')\n version = dictionary.get('version')\n vm_backup_status = dictionary.get('vmBackupStatus')\n vm_backup_type = dictionary.get('vmBackupType')\n return cls(is_highly_available, version, vm_backup_status, vm_backup_type)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies", "class_name": "HypervVirtualMachine", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HypervVirtualMachine:\n \"\"\"Implementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\"\"\"\n\n def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None):\n \"\"\"Constructor for the HypervVirtualMachine class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_highly_available = is_highly_available\n self.version = version\n self.vm_backup_status = vm_backup_status\n self.vm_backup_type = vm_backup_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_highly_available = dictionary.get('isHighlyAvailable')\n version = dictionary.get('version')\n vm_backup_status = dictionary.get('vmBackupStatus')\n vm_backup_type = dictionary.get('vmBackupType')\n return cls(is_highly_available, version, vm_backup_status, vm_backup_type)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000229", "length_bytes": 2925, "license_type": "permissive", "methods": [{"docstring": "Constructor for the HypervVirtualMachine class", "name": "__init__", "signature": "def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001421", "prompt": "Implement the Python class `HypervVirtualMachine` described below.\n\nClass description:\nImplementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\n\nMethod signatures and docstrings:\n- def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None): Constructor for the HypervVirtualMachine class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `HypervVirtualMachine` described below.\n\nClass description:\nImplementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\n\nMethod signatures and docstrings:\n- def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None): Constructor for the HypervVirtualMachine class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass HypervVirtualMachine:\n \"\"\"Implementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\"\"\"\n\n def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None):\n \"\"\"Constructor for the HypervVirtualMachine class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_highly_available = is_highly_available\n self.version = version\n self.vm_backup_status = vm_backup_status\n self.vm_backup_type = vm_backup_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_highly_available = dictionary.get('isHighlyAvailable')\n version = dictionary.get('version')\n vm_backup_status = dictionary.get('vmBackupStatus')\n vm_backup_type = dictionary.get('vmBackupType')\n return cls(is_highly_available, version, vm_backup_status, vm_backup_type)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass HypervVirtualMachine:\n \"\"\"Implementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\"\"\"\n\n def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None):\n \"\"\"Constructor for the HypervVirtualMachine class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HypervVirtualMachine:\n \"\"\"Implementation of the 'HypervVirtualMachine' model. Specifies information about a VirtualMachine Object in HyperV environment. Attributes: is_highly_available (bool): Specifies whether the VM is Highly Available or not. version (string): Specifies the version of the VM. For example, 8.0, 5.0 etc. vm_backup_status (VmBackupStatusEnum): Specifies the status of the VM for backup purpose. overrideDescription: true Specifies the backup status of a HyperV Virtual Machine object. 'kSupported' indicates the agent on the VM can do backup. 'kUnsupportedConfig' indicates the agent on the VM cannot do backup. 'kMissing' indicates the VM is not found in SCVMM. vm_backup_type (VmBackupTypeEnum): Specifies\"\"\"\n\n def __init__(self, is_highly_available=None, version=None, vm_backup_status=None, vm_backup_type=None):\n \"\"\"Constructor for the HypervVirtualMachine class\"\"\"\n self.is_highly_available = is_highly_available\n self.version = version\n self.vm_backup_status = vm_backup_status\n self.vm_backup_type = vm_backup_type\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n is_highly_available = dictionary.get('isHighlyAvailable')\n version = dictionary.get('version')\n vm_backup_status = dictionary.get('vmBackupStatus')\n vm_backup_type = dictionary.get('vmBackupType')\n return cls(is_highly_available, version, vm_backup_status, vm_backup_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/hyperv_virtual_machine.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "fa37ba43d9881aa3a47fa4d92c9d2ca7d2dafe27", "bodies": ["init_res = super(ResUserInherit, self).__init__(pool, cr)\nself.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)\nself.SELF_WRITEABLE_FIELDS.extend(['wx_user_ids'])\nself.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)\nself.SELF_READABLE_FIELDS.extend(['wx_user_ids'])\nreturn init_res", "check_result = self.check_login_source(password)\nif check_result is True:\n return\nelse:\n super()._check_credentials(password)", "for record in self:\n if len(record.cur_wx_user_id) > 0:\n record.cur_wx_user_id = record.cur_wx_user_id[0].id", "if isinstance(password, dict) is False:\n return False\nelif request.session['auth_code'] == password['auth_code']:\n return True\nelse:\n return False"], "bodies_text": "<|body_start_0|>\n init_res = super(ResUserInherit, self).__init__(pool, cr)\n self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)\n self.SELF_WRITEABLE_FIELDS.extend(['wx_user_ids'])\n self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)\n self.SELF_READABLE_FIELDS.extend(['wx_user_ids'])\n return init_res\n<|end_body_0|>\n\n<|body_start_1|>\n check_result = self.check_login_source(password)\n if check_result is True:\n return\n else:\n super()._check_credentials(password)\n<|end_body_1|>\n\n<|body_start_2|>\n for record in self:\n if len(record.cur_wx_user_id) > 0:\n record.cur_wx_user_id = record.cur_wx_user_id[0].id\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(password, dict) is False:\n return False\n elif request.session['auth_code'] == password['auth_code']:\n return True\n else:\n return False\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ResUserInherit", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResUserInherit:\n\n def __init__(self, pool, cr):\n \"\"\"Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\"\"\"\n <|body_0|>\n\n def _check_credentials(self, password):\n \"\"\"改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\"\"\"\n <|body_1|>\n\n def _compute_default_wx_user(self):\n \"\"\"计算默认微信用户 :return:\"\"\"\n <|body_2|>\n\n def check_login_source(self, password):\n \"\"\"继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n init_res = super(ResUserInherit, self).__init__(pool, cr)\n self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)\n self.SELF_WRITEABLE_FIELDS.extend(['wx_user_ids'])\n self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)\n self.SELF_READABLE_FIELDS.extend(['wx_user_ids'])\n return init_res\n<|end_body_0|>\n\n<|body_start_1|>\n check_result = self.check_login_source(password)\n if check_result is True:\n return\n else:\n super()._check_credentials(password)\n<|end_body_1|>\n\n<|body_start_2|>\n for record in self:\n if len(record.cur_wx_user_id) > 0:\n record.cur_wx_user_id = record.cur_wx_user_id[0].id\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(password, dict) is False:\n return False\n elif request.session['auth_code'] == password['auth_code']:\n return True\n else:\n return False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000230", "length_bytes": 3012, "license_type": "no_license", "methods": [{"docstring": "Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.", "name": "__init__", "signature": "def __init__(self, pool, cr)"}, {"docstring": "改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:", "name": "_check_credentials", "signature": "def _check_credentials(self, password)"}, {"docstring": "计算默认微信用户 :return:", "name": "_compute_default_wx_user", "signature": "def _compute_default_wx_user(self)"}, {"docstring": "继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:", "name": "check_login_source", "signature": "def check_login_source(self, password)"}], "n_methods": 4, "prompt": "Implement the Python class `ResUserInherit` described below.\n\nClass description:\nImplement the ResUserInherit class.\n\nMethod signatures and docstrings:\n- def __init__(self, pool, cr): Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\n- def _check_credentials(self, password): 改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\n- def _compute_default_wx_user(self): 计算默认微信用户 :return:\n- def check_login_source(self, password): 继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:", "prompted_full_text": "Implement the Python class `ResUserInherit` described below.\n\nClass description:\nImplement the ResUserInherit class.\n\nMethod signatures and docstrings:\n- def __init__(self, pool, cr): Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\n- def _check_credentials(self, password): 改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\n- def _compute_default_wx_user(self): 计算默认微信用户 :return:\n- def check_login_source(self, password): 继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:\n\n<|skeleton|>\nclass ResUserInherit:\n\n def __init__(self, pool, cr):\n \"\"\"Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\"\"\"\n <|body_0|>\n\n def _check_credentials(self, password):\n \"\"\"改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\"\"\"\n <|body_1|>\n\n def _compute_default_wx_user(self):\n \"\"\"计算默认微信用户 :return:\"\"\"\n <|body_2|>\n\n def check_login_source(self, password):\n \"\"\"继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n init_res = super(ResUserInherit, self).__init__(pool, cr)\n self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)\n self.SELF_WRITEABLE_FIELDS.extend(['wx_user_ids'])\n self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)\n self.SELF_READABLE_FIELDS.extend(['wx_user_ids'])\n return init_res\n<|end_body_0|>\n\n<|body_start_1|>\n check_result = self.check_login_source(password)\n if check_result is True:\n return\n else:\n super()._check_credentials(password)\n<|end_body_1|>\n\n<|body_start_2|>\n for record in self:\n if len(record.cur_wx_user_id) > 0:\n record.cur_wx_user_id = record.cur_wx_user_id[0].id\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(password, dict) is False:\n return False\n elif request.session['auth_code'] == password['auth_code']:\n return True\n else:\n return False\n<|end_body_3|>\n", "revision_id": "13b428a5c4ade6278e3e5e996ef10d9fb0fea4b9", "skeleton": "<|skeleton|>\nclass ResUserInherit:\n\n def __init__(self, pool, cr):\n \"\"\"Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\"\"\"\n <|body_0|>\n\n def _check_credentials(self, password):\n \"\"\"改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\"\"\"\n <|body_1|>\n\n def _compute_default_wx_user(self):\n \"\"\"计算默认微信用户 :return:\"\"\"\n <|body_2|>\n\n def check_login_source(self, password):\n \"\"\"继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ResUserInherit:\n def __init__(self, pool, cr):\n \"\"\"Override of __init__ to add access rights on display_employees_suggestions fields. Access rights are disabled by default, but allowed on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.\"\"\"\n init_res = super(ResUserInherit, self).__init__(pool, cr)\n self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)\n self.SELF_WRITEABLE_FIELDS.extend(['wx_user_ids'])\n self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)\n self.SELF_READABLE_FIELDS.extend(['wx_user_ids'])\n return init_res\n\n def _check_credentials(self, password):\n \"\"\"改写了原有的_check_credentials函数,原有方法检查成功,则不抛错,直接return,所以通过是否抛错来绕过验证。 在验证之前加入了一个自定的验证函数 :param password: 传入的密码 :return:\"\"\"\n check_result = self.check_login_source(password)\n if check_result is True:\n return\n else:\n super()._check_credentials(password)\n\n def _compute_default_wx_user(self):\n \"\"\"计算默认微信用户 :return:\"\"\"\n for record in self:\n if len(record.cur_wx_user_id) > 0:\n record.cur_wx_user_id = record.cur_wx_user_id[0].id\n\n def check_login_source(self, password):\n \"\"\"继承并添加企业微信验证 :param password: 如果是企业微信扫码、免登过来的,则为一个dict,否则为一个字符串 :return:\"\"\"\n if isinstance(password, dict) is False:\n return False\n elif request.session['auth_code'] == password['auth_code']:\n return True\n else:\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "mdias_addons/funenc_wechat/models/res_user.py", "source_repo": "rezaghanimi/main_mdias", "split": "val", "star_events_count": 0} {"blob_id": "51824e56f118de9aac609c013af6af59c98a3a62", "bodies": ["parser.add_argument('config', metavar='INSTANCE_CONFIG', completer=flags.InstanceConfigCompleter, help=\"Cloud Spanner instance config. The 'custom-' prefix is required to avoid name conflicts with Google-managed configurations.\")\nparser.add_argument('--display-name', help='The name of this instance configuration as it appears in UIs.')\nparser.add_argument('--etag', help='Used for optimistic concurrency control.')\nbase.ASYNC_FLAG.AddToParser(parser)\nlabels_util.AddUpdateLabelsFlags(parser)\nparser.add_argument('--validate-only', action='store_true', default=False, help='Use this flag to validate that the request will succeed before executing it.')", "op = instance_configs.Patch(args)\nif args.async_ or args.validate_only:\n return op\nreturn instance_config_operations.Await(op, 'Updating instance-config')"], "bodies_text": "<|body_start_0|>\n parser.add_argument('config', metavar='INSTANCE_CONFIG', completer=flags.InstanceConfigCompleter, help=\"Cloud Spanner instance config. The 'custom-' prefix is required to avoid name conflicts with Google-managed configurations.\")\n parser.add_argument('--display-name', help='The name of this instance configuration as it appears in UIs.')\n parser.add_argument('--etag', help='Used for optimistic concurrency control.')\n base.ASYNC_FLAG.AddToParser(parser)\n labels_util.AddUpdateLabelsFlags(parser)\n parser.add_argument('--validate-only', action='store_true', default=False, help='Use this flag to validate that the request will succeed before executing it.')\n<|end_body_0|>\n\n<|body_start_1|>\n op = instance_configs.Patch(args)\n if args.async_ or args.validate_only:\n return op\n return instance_config_operations.Await(op, 'Updating instance-config')\n<|end_body_1|>\n", "class_docstring": "Update a Cloud Spanner instance configuration.", "class_name": "Update", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Update:\n \"\"\"Update a Cloud Spanner instance configuration.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('config', metavar='INSTANCE_CONFIG', completer=flags.InstanceConfigCompleter, help=\"Cloud Spanner instance config. The 'custom-' prefix is required to avoid name conflicts with Google-managed configurations.\")\n parser.add_argument('--display-name', help='The name of this instance configuration as it appears in UIs.')\n parser.add_argument('--etag', help='Used for optimistic concurrency control.')\n base.ASYNC_FLAG.AddToParser(parser)\n labels_util.AddUpdateLabelsFlags(parser)\n parser.add_argument('--validate-only', action='store_true', default=False, help='Use this flag to validate that the request will succeed before executing it.')\n<|end_body_0|>\n\n<|body_start_1|>\n op = instance_configs.Patch(args)\n if args.async_ or args.validate_only:\n return op\n return instance_config_operations.Await(op, 'Updating instance-config')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000231", "length_bytes": 3901, "license_type": "permissive", "methods": [{"docstring": "Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "prompt": "Implement the Python class `Update` described below.\n\nClass description:\nUpdate a Cloud Spanner instance configuration.\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.", "prompted_full_text": "Implement the Python class `Update` described below.\n\nClass description:\nUpdate a Cloud Spanner instance configuration.\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.\n\n<|skeleton|>\nclass Update:\n \"\"\"Update a Cloud Spanner instance configuration.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('config', metavar='INSTANCE_CONFIG', completer=flags.InstanceConfigCompleter, help=\"Cloud Spanner instance config. The 'custom-' prefix is required to avoid name conflicts with Google-managed configurations.\")\n parser.add_argument('--display-name', help='The name of this instance configuration as it appears in UIs.')\n parser.add_argument('--etag', help='Used for optimistic concurrency control.')\n base.ASYNC_FLAG.AddToParser(parser)\n labels_util.AddUpdateLabelsFlags(parser)\n parser.add_argument('--validate-only', action='store_true', default=False, help='Use this flag to validate that the request will succeed before executing it.')\n<|end_body_0|>\n\n<|body_start_1|>\n op = instance_configs.Patch(args)\n if args.async_ or args.validate_only:\n return op\n return instance_config_operations.Await(op, 'Updating instance-config')\n<|end_body_1|>\n", "revision_id": "392abf004b16203030e6efd2f0af24db7c8d669e", "skeleton": "<|skeleton|>\nclass Update:\n \"\"\"Update a Cloud Spanner instance configuration.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Update:\n \"\"\"Update a Cloud Spanner instance configuration.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n parser.add_argument('config', metavar='INSTANCE_CONFIG', completer=flags.InstanceConfigCompleter, help=\"Cloud Spanner instance config. The 'custom-' prefix is required to avoid name conflicts with Google-managed configurations.\")\n parser.add_argument('--display-name', help='The name of this instance configuration as it appears in UIs.')\n parser.add_argument('--etag', help='Used for optimistic concurrency control.')\n base.ASYNC_FLAG.AddToParser(parser)\n labels_util.AddUpdateLabelsFlags(parser)\n parser.add_argument('--validate-only', action='store_true', default=False, help='Use this flag to validate that the request will succeed before executing it.')\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Instance config update response.\"\"\"\n op = instance_configs.Patch(args)\n if args.async_ or args.validate_only:\n return op\n return instance_config_operations.Await(op, 'Updating instance-config')\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/surface/spanner/instance_configs/update.py", "source_repo": "google-cloud-sdk-unofficial/google-cloud-sdk", "split": "val", "star_events_count": 9} {"blob_id": "1a6281e2143db57d0e75b8987e9b3eaf7ec89c41", "bodies": ["n = len(arr)\npre_sum = [0] * (n + 1)\nans = 0\nfor i in range(n):\n pre_sum[i + 1] = pre_sum[i] + arr[i]\nfor i in range(1, n + 1):\n for j in range(i):\n if (pre_sum[i] - pre_sum[j]) % 2 == 1:\n ans += 1\nreturn ans % (1000000000.0 + 7)", "n = len(arr)\nans = 0\neven = 0\nodd = 0\nfor i in range(n):\n if arr[i] % 2:\n odd, even = (even + 1, odd)\n else:\n odd, even = (odd, even + 1)\n ans += odd\nreturn int(ans % (1000000000.0 + 7))"], "bodies_text": "<|body_start_0|>\n n = len(arr)\n pre_sum = [0] * (n + 1)\n ans = 0\n for i in range(n):\n pre_sum[i + 1] = pre_sum[i] + arr[i]\n for i in range(1, n + 1):\n for j in range(i):\n if (pre_sum[i] - pre_sum[j]) % 2 == 1:\n ans += 1\n return ans % (1000000000.0 + 7)\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ans = 0\n even = 0\n odd = 0\n for i in range(n):\n if arr[i] % 2:\n odd, even = (even + 1, odd)\n else:\n odd, even = (odd, even + 1)\n ans += odd\n return int(ans % (1000000000.0 + 7))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numOfSubarrays_timeout(self, arr: List[int]) -> int:\n \"\"\"deprecated: 超时\"\"\"\n <|body_0|>\n\n def numOfSubarrays(self, arr: List[int]) -> int:\n \"\"\"优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(arr)\n pre_sum = [0] * (n + 1)\n ans = 0\n for i in range(n):\n pre_sum[i + 1] = pre_sum[i] + arr[i]\n for i in range(1, n + 1):\n for j in range(i):\n if (pre_sum[i] - pre_sum[j]) % 2 == 1:\n ans += 1\n return ans % (1000000000.0 + 7)\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ans = 0\n even = 0\n odd = 0\n for i in range(n):\n if arr[i] % 2:\n odd, even = (even + 1, odd)\n else:\n odd, even = (odd, even + 1)\n ans += odd\n return int(ans % (1000000000.0 + 7))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000232", "length_bytes": 2738, "license_type": "no_license", "methods": [{"docstring": "deprecated: 超时", "name": "numOfSubarrays_timeout", "signature": "def numOfSubarrays_timeout(self, arr: List[int]) -> int"}, {"docstring": "优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$", "name": "numOfSubarrays", "signature": "def numOfSubarrays(self, arr: List[int]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numOfSubarrays_timeout(self, arr: List[int]) -> int: deprecated: 超时\n- def numOfSubarrays(self, arr: List[int]) -> int: 优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numOfSubarrays_timeout(self, arr: List[int]) -> int: deprecated: 超时\n- def numOfSubarrays(self, arr: List[int]) -> int: 优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$\n\n<|skeleton|>\nclass Solution:\n\n def numOfSubarrays_timeout(self, arr: List[int]) -> int:\n \"\"\"deprecated: 超时\"\"\"\n <|body_0|>\n\n def numOfSubarrays(self, arr: List[int]) -> int:\n \"\"\"优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(arr)\n pre_sum = [0] * (n + 1)\n ans = 0\n for i in range(n):\n pre_sum[i + 1] = pre_sum[i] + arr[i]\n for i in range(1, n + 1):\n for j in range(i):\n if (pre_sum[i] - pre_sum[j]) % 2 == 1:\n ans += 1\n return ans % (1000000000.0 + 7)\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n ans = 0\n even = 0\n odd = 0\n for i in range(n):\n if arr[i] % 2:\n odd, even = (even + 1, odd)\n else:\n odd, even = (odd, even + 1)\n ans += odd\n return int(ans % (1000000000.0 + 7))\n<|end_body_1|>\n", "revision_id": "f832227c4d0e0b1c0cc326561187004ef24e2a68", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numOfSubarrays_timeout(self, arr: List[int]) -> int:\n \"\"\"deprecated: 超时\"\"\"\n <|body_0|>\n\n def numOfSubarrays(self, arr: List[int]) -> int:\n \"\"\"优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numOfSubarrays_timeout(self, arr: List[int]) -> int:\n \"\"\"deprecated: 超时\"\"\"\n n = len(arr)\n pre_sum = [0] * (n + 1)\n ans = 0\n for i in range(n):\n pre_sum[i + 1] = pre_sum[i] + arr[i]\n for i in range(1, n + 1):\n for j in range(i):\n if (pre_sum[i] - pre_sum[j]) % 2 == 1:\n ans += 1\n return ans % (1000000000.0 + 7)\n\n def numOfSubarrays(self, arr: List[int]) -> int:\n \"\"\"优化:odd + odd => even / even + even => even / odd + even => odd 耗时:$O(n)$\"\"\"\n n = len(arr)\n ans = 0\n even = 0\n odd = 0\n for i in range(n):\n if arr[i] % 2:\n odd, even = (even + 1, odd)\n else:\n odd, even = (odd, even + 1)\n ans += odd\n return int(ans % (1000000000.0 + 7))\n", "source": "the_stack_v2_python_sparse", "source_path": "1524.py", "source_repo": "Gackle/leetcode_practice", "split": "val", "star_events_count": 0} {"blob_id": "50ba99a16374d14ea8ac3fca26dd55229e62c7cb", "bodies": ["world_size = int(os.environ['WORLD_SIZE'])\nnode_rank = int(os.environ['RANK'])\nlocal_rank = int(os.environ['LOCAL_RANK'])\nself.result_file_template = result_file_template\ntorch.distributed.init_process_group(backend='nccl')\ncuda.set_device(local_rank)\nlabel_mapping = {0, 'right', 1, 'left', 2, 'neutral'}\ntest_db_path = os.path.join(os.path.dirname(__file__), 'datasets/test_db.sqlite')\ndataset = SqliteDataset('fake/csv/path', label_mapping, sqlite_path=test_db_path, sequence_len=128, text_col_name='foo', label_col_name='bar', delete_db=False, quiet=True)\nself.dataloader = MultiprocessingDataloader(dataset, world_size, node_rank=node_rank)\naccumulated_data = {'epoch0': [], 'epoch1': []}\nfor epoch in range(2):\n self.dataloader.set_epoch(epoch)\n samples = self.run(epoch, accumulated_data)\n res_file_path = os.path.join(self.result_file_template, 'result' + str(local_rank))\nwith open(res_file_path, 'w') as fd:\n fd.write(str(samples))", "for data in self.dataloader:\n if epoch == 0:\n accumulated_data['epoch0'].append(int(data['sample_id']))\n elif epoch == 1:\n accumulated_data['epoch1'].append(int(data['sample_id']))\n else:\n raise ValueError('Bad epoch')\nreturn accumulated_data"], "bodies_text": "<|body_start_0|>\n world_size = int(os.environ['WORLD_SIZE'])\n node_rank = int(os.environ['RANK'])\n local_rank = int(os.environ['LOCAL_RANK'])\n self.result_file_template = result_file_template\n torch.distributed.init_process_group(backend='nccl')\n cuda.set_device(local_rank)\n label_mapping = {0, 'right', 1, 'left', 2, 'neutral'}\n test_db_path = os.path.join(os.path.dirname(__file__), 'datasets/test_db.sqlite')\n dataset = SqliteDataset('fake/csv/path', label_mapping, sqlite_path=test_db_path, sequence_len=128, text_col_name='foo', label_col_name='bar', delete_db=False, quiet=True)\n self.dataloader = MultiprocessingDataloader(dataset, world_size, node_rank=node_rank)\n accumulated_data = {'epoch0': [], 'epoch1': []}\n for epoch in range(2):\n self.dataloader.set_epoch(epoch)\n samples = self.run(epoch, accumulated_data)\n res_file_path = os.path.join(self.result_file_template, 'result' + str(local_rank))\n with open(res_file_path, 'w') as fd:\n fd.write(str(samples))\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self.dataloader:\n if epoch == 0:\n accumulated_data['epoch0'].append(int(data['sample_id']))\n elif epoch == 1:\n accumulated_data['epoch1'].append(int(data['sample_id']))\n else:\n raise ValueError('Bad epoch')\n return accumulated_data\n<|end_body_1|>\n", "class_docstring": "Pretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc", "class_name": "TrainProcessTestHelper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TrainProcessTestHelper:\n \"\"\"Pretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\"\"\"\n\n def __init__(self, result_file_template):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self, epoch, accumulated_data):\n \"\"\"Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n world_size = int(os.environ['WORLD_SIZE'])\n node_rank = int(os.environ['RANK'])\n local_rank = int(os.environ['LOCAL_RANK'])\n self.result_file_template = result_file_template\n torch.distributed.init_process_group(backend='nccl')\n cuda.set_device(local_rank)\n label_mapping = {0, 'right', 1, 'left', 2, 'neutral'}\n test_db_path = os.path.join(os.path.dirname(__file__), 'datasets/test_db.sqlite')\n dataset = SqliteDataset('fake/csv/path', label_mapping, sqlite_path=test_db_path, sequence_len=128, text_col_name='foo', label_col_name='bar', delete_db=False, quiet=True)\n self.dataloader = MultiprocessingDataloader(dataset, world_size, node_rank=node_rank)\n accumulated_data = {'epoch0': [], 'epoch1': []}\n for epoch in range(2):\n self.dataloader.set_epoch(epoch)\n samples = self.run(epoch, accumulated_data)\n res_file_path = os.path.join(self.result_file_template, 'result' + str(local_rank))\n with open(res_file_path, 'w') as fd:\n fd.write(str(samples))\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self.dataloader:\n if epoch == 0:\n accumulated_data['epoch0'].append(int(data['sample_id']))\n elif epoch == 1:\n accumulated_data['epoch1'].append(int(data['sample_id']))\n else:\n raise ValueError('Bad epoch')\n return accumulated_data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000233", "length_bytes": 6063, "license_type": "no_license", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, result_file_template)"}, {"docstring": "Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:", "name": "run", "signature": "def run(self, epoch, accumulated_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006643", "prompt": "Implement the Python class `TrainProcessTestHelper` described below.\n\nClass description:\nPretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\n\nMethod signatures and docstrings:\n- def __init__(self, result_file_template): Constructor\n- def run(self, epoch, accumulated_data): Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:", "prompted_full_text": "Implement the Python class `TrainProcessTestHelper` described below.\n\nClass description:\nPretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\n\nMethod signatures and docstrings:\n- def __init__(self, result_file_template): Constructor\n- def run(self, epoch, accumulated_data): Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:\n\n<|skeleton|>\nclass TrainProcessTestHelper:\n \"\"\"Pretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\"\"\"\n\n def __init__(self, result_file_template):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self, epoch, accumulated_data):\n \"\"\"Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n world_size = int(os.environ['WORLD_SIZE'])\n node_rank = int(os.environ['RANK'])\n local_rank = int(os.environ['LOCAL_RANK'])\n self.result_file_template = result_file_template\n torch.distributed.init_process_group(backend='nccl')\n cuda.set_device(local_rank)\n label_mapping = {0, 'right', 1, 'left', 2, 'neutral'}\n test_db_path = os.path.join(os.path.dirname(__file__), 'datasets/test_db.sqlite')\n dataset = SqliteDataset('fake/csv/path', label_mapping, sqlite_path=test_db_path, sequence_len=128, text_col_name='foo', label_col_name='bar', delete_db=False, quiet=True)\n self.dataloader = MultiprocessingDataloader(dataset, world_size, node_rank=node_rank)\n accumulated_data = {'epoch0': [], 'epoch1': []}\n for epoch in range(2):\n self.dataloader.set_epoch(epoch)\n samples = self.run(epoch, accumulated_data)\n res_file_path = os.path.join(self.result_file_template, 'result' + str(local_rank))\n with open(res_file_path, 'w') as fd:\n fd.write(str(samples))\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self.dataloader:\n if epoch == 0:\n accumulated_data['epoch0'].append(int(data['sample_id']))\n elif epoch == 1:\n accumulated_data['epoch1'].append(int(data['sample_id']))\n else:\n raise ValueError('Bad epoch')\n return accumulated_data\n<|end_body_1|>\n", "revision_id": "854358d573831cd47d926448412daf3062d8c291", "skeleton": "<|skeleton|>\nclass TrainProcessTestHelper:\n \"\"\"Pretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\"\"\"\n\n def __init__(self, result_file_template):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self, epoch, accumulated_data):\n \"\"\"Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TrainProcessTestHelper:\n \"\"\"Pretends to be a training script that is forked into (potentially) multiple processes on multiple machines. This minimal test script is used with test_multiprocess_sampler.py. It merely draws samples from an Sqlite test database via a distributed sampler. Each forked instance of this script runs through two epochs over the database. It writes the samples it draws in each epoch to file, which is different for each process. The main unittest (test_multiprocess_sampler.py) then checks that taken together, the samples each process draws satisfy the following: o Within one epoch all processes together draw exactly the samples 0, 1,2,3,...23. o This sequence is permuted differently in the two epoc\"\"\"\n\n def __init__(self, result_file_template):\n \"\"\"Constructor\"\"\"\n world_size = int(os.environ['WORLD_SIZE'])\n node_rank = int(os.environ['RANK'])\n local_rank = int(os.environ['LOCAL_RANK'])\n self.result_file_template = result_file_template\n torch.distributed.init_process_group(backend='nccl')\n cuda.set_device(local_rank)\n label_mapping = {0, 'right', 1, 'left', 2, 'neutral'}\n test_db_path = os.path.join(os.path.dirname(__file__), 'datasets/test_db.sqlite')\n dataset = SqliteDataset('fake/csv/path', label_mapping, sqlite_path=test_db_path, sequence_len=128, text_col_name='foo', label_col_name='bar', delete_db=False, quiet=True)\n self.dataloader = MultiprocessingDataloader(dataset, world_size, node_rank=node_rank)\n accumulated_data = {'epoch0': [], 'epoch1': []}\n for epoch in range(2):\n self.dataloader.set_epoch(epoch)\n samples = self.run(epoch, accumulated_data)\n res_file_path = os.path.join(self.result_file_template, 'result' + str(local_rank))\n with open(res_file_path, 'w') as fd:\n fd.write(str(samples))\n\n def run(self, epoch, accumulated_data):\n \"\"\"Ask for all the samples in a loop Write the result to a file as a dict @param epoch: @type epoch:\"\"\"\n for data in self.dataloader:\n if epoch == 0:\n accumulated_data['epoch0'].append(int(data['sample_id']))\n elif epoch == 1:\n accumulated_data['epoch1'].append(int(data['sample_id']))\n else:\n raise ValueError('Bad epoch')\n return accumulated_data\n", "source": "the_stack_v2_python_sparse", "source_path": "src/classifier/training_script_test_helper.py", "source_repo": "paepcke/bert_train_parallel", "split": "val", "star_events_count": 0} {"blob_id": "22ef2b0ef2fd54e04c079274f902d878a79fc345", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn UserExperienceAnalyticsDeviceStartupHistory()", "from .entity import Entity\nfrom .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\nfrom .entity import Entity\nfrom .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\nfields: Dict[str, Callable[[Any], None]] = {'coreBootTimeInMs': lambda n: setattr(self, 'core_boot_time_in_ms', n.get_int_value()), 'coreLoginTimeInMs': lambda n: setattr(self, 'core_login_time_in_ms', n.get_int_value()), 'deviceId': lambda n: setattr(self, 'device_id', n.get_str_value()), 'featureUpdateBootTimeInMs': lambda n: setattr(self, 'feature_update_boot_time_in_ms', n.get_int_value()), 'groupPolicyBootTimeInMs': lambda n: setattr(self, 'group_policy_boot_time_in_ms', n.get_int_value()), 'groupPolicyLoginTimeInMs': lambda n: setattr(self, 'group_policy_login_time_in_ms', n.get_int_value()), 'isFeatureUpdate': lambda n: setattr(self, 'is_feature_update', n.get_bool_value()), 'isFirstLogin': lambda n: setattr(self, 'is_first_login', n.get_bool_value()), 'operatingSystemVersion': lambda n: setattr(self, 'operating_system_version', n.get_str_value()), 'responsiveDesktopTimeInMs': lambda n: setattr(self, 'responsive_desktop_time_in_ms', n.get_int_value()), 'restartCategory': lambda n: setattr(self, 'restart_category', n.get_enum_value(UserExperienceAnalyticsOperatingSystemRestartCategory)), 'restartFaultBucket': lambda n: setattr(self, 'restart_fault_bucket', n.get_str_value()), 'restartStopCode': lambda n: setattr(self, 'restart_stop_code', n.get_str_value()), 'startTime': lambda n: setattr(self, 'start_time', n.get_datetime_value()), 'totalBootTimeInMs': lambda n: setattr(self, 'total_boot_time_in_ms', n.get_int_value()), 'totalLoginTimeInMs': lambda n: setattr(self, 'total_login_time_in_ms', n.get_int_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_int_value('coreBootTimeInMs', self.core_boot_time_in_ms)\nwriter.write_int_value('coreLoginTimeInMs', self.core_login_time_in_ms)\nwriter.write_str_value('deviceId', self.device_id)\nwriter.write_int_value('featureUpdateBootTimeInMs', self.feature_update_boot_time_in_ms)\nwriter.write_int_value('groupPolicyBootTimeInMs', self.group_policy_boot_time_in_ms)\nwriter.write_int_value('groupPolicyLoginTimeInMs', self.group_policy_login_time_in_ms)\nwriter.write_bool_value('isFeatureUpdate', self.is_feature_update)\nwriter.write_bool_value('isFirstLogin', self.is_first_login)\nwriter.write_str_value('operatingSystemVersion', self.operating_system_version)\nwriter.write_int_value('responsiveDesktopTimeInMs', self.responsive_desktop_time_in_ms)\nwriter.write_enum_value('restartCategory', self.restart_category)\nwriter.write_str_value('restartFaultBucket', self.restart_fault_bucket)\nwriter.write_str_value('restartStopCode', self.restart_stop_code)\nwriter.write_datetime_value('startTime', self.start_time)\nwriter.write_int_value('totalBootTimeInMs', self.total_boot_time_in_ms)\nwriter.write_int_value('totalLoginTimeInMs', self.total_login_time_in_ms)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UserExperienceAnalyticsDeviceStartupHistory()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n fields: Dict[str, Callable[[Any], None]] = {'coreBootTimeInMs': lambda n: setattr(self, 'core_boot_time_in_ms', n.get_int_value()), 'coreLoginTimeInMs': lambda n: setattr(self, 'core_login_time_in_ms', n.get_int_value()), 'deviceId': lambda n: setattr(self, 'device_id', n.get_str_value()), 'featureUpdateBootTimeInMs': lambda n: setattr(self, 'feature_update_boot_time_in_ms', n.get_int_value()), 'groupPolicyBootTimeInMs': lambda n: setattr(self, 'group_policy_boot_time_in_ms', n.get_int_value()), 'groupPolicyLoginTimeInMs': lambda n: setattr(self, 'group_policy_login_time_in_ms', n.get_int_value()), 'isFeatureUpdate': lambda n: setattr(self, 'is_feature_update', n.get_bool_value()), 'isFirstLogin': lambda n: setattr(self, 'is_first_login', n.get_bool_value()), 'operatingSystemVersion': lambda n: setattr(self, 'operating_system_version', n.get_str_value()), 'responsiveDesktopTimeInMs': lambda n: setattr(self, 'responsive_desktop_time_in_ms', n.get_int_value()), 'restartCategory': lambda n: setattr(self, 'restart_category', n.get_enum_value(UserExperienceAnalyticsOperatingSystemRestartCategory)), 'restartFaultBucket': lambda n: setattr(self, 'restart_fault_bucket', n.get_str_value()), 'restartStopCode': lambda n: setattr(self, 'restart_stop_code', n.get_str_value()), 'startTime': lambda n: setattr(self, 'start_time', n.get_datetime_value()), 'totalBootTimeInMs': lambda n: setattr(self, 'total_boot_time_in_ms', n.get_int_value()), 'totalLoginTimeInMs': lambda n: setattr(self, 'total_login_time_in_ms', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('coreBootTimeInMs', self.core_boot_time_in_ms)\n writer.write_int_value('coreLoginTimeInMs', self.core_login_time_in_ms)\n writer.write_str_value('deviceId', self.device_id)\n writer.write_int_value('featureUpdateBootTimeInMs', self.feature_update_boot_time_in_ms)\n writer.write_int_value('groupPolicyBootTimeInMs', self.group_policy_boot_time_in_ms)\n writer.write_int_value('groupPolicyLoginTimeInMs', self.group_policy_login_time_in_ms)\n writer.write_bool_value('isFeatureUpdate', self.is_feature_update)\n writer.write_bool_value('isFirstLogin', self.is_first_login)\n writer.write_str_value('operatingSystemVersion', self.operating_system_version)\n writer.write_int_value('responsiveDesktopTimeInMs', self.responsive_desktop_time_in_ms)\n writer.write_enum_value('restartCategory', self.restart_category)\n writer.write_str_value('restartFaultBucket', self.restart_fault_bucket)\n writer.write_str_value('restartStopCode', self.restart_stop_code)\n writer.write_datetime_value('startTime', self.start_time)\n writer.write_int_value('totalBootTimeInMs', self.total_boot_time_in_ms)\n writer.write_int_value('totalLoginTimeInMs', self.total_login_time_in_ms)\n<|end_body_2|>\n", "class_docstring": "The user experience analytics device startup history entity contains device boot performance history details.", "class_name": "UserExperienceAnalyticsDeviceStartupHistory", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"The user experience analytics device startup history entity contains device boot performance history details.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UserExperienceAnalyticsDeviceStartupHistory()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n fields: Dict[str, Callable[[Any], None]] = {'coreBootTimeInMs': lambda n: setattr(self, 'core_boot_time_in_ms', n.get_int_value()), 'coreLoginTimeInMs': lambda n: setattr(self, 'core_login_time_in_ms', n.get_int_value()), 'deviceId': lambda n: setattr(self, 'device_id', n.get_str_value()), 'featureUpdateBootTimeInMs': lambda n: setattr(self, 'feature_update_boot_time_in_ms', n.get_int_value()), 'groupPolicyBootTimeInMs': lambda n: setattr(self, 'group_policy_boot_time_in_ms', n.get_int_value()), 'groupPolicyLoginTimeInMs': lambda n: setattr(self, 'group_policy_login_time_in_ms', n.get_int_value()), 'isFeatureUpdate': lambda n: setattr(self, 'is_feature_update', n.get_bool_value()), 'isFirstLogin': lambda n: setattr(self, 'is_first_login', n.get_bool_value()), 'operatingSystemVersion': lambda n: setattr(self, 'operating_system_version', n.get_str_value()), 'responsiveDesktopTimeInMs': lambda n: setattr(self, 'responsive_desktop_time_in_ms', n.get_int_value()), 'restartCategory': lambda n: setattr(self, 'restart_category', n.get_enum_value(UserExperienceAnalyticsOperatingSystemRestartCategory)), 'restartFaultBucket': lambda n: setattr(self, 'restart_fault_bucket', n.get_str_value()), 'restartStopCode': lambda n: setattr(self, 'restart_stop_code', n.get_str_value()), 'startTime': lambda n: setattr(self, 'start_time', n.get_datetime_value()), 'totalBootTimeInMs': lambda n: setattr(self, 'total_boot_time_in_ms', n.get_int_value()), 'totalLoginTimeInMs': lambda n: setattr(self, 'total_login_time_in_ms', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('coreBootTimeInMs', self.core_boot_time_in_ms)\n writer.write_int_value('coreLoginTimeInMs', self.core_login_time_in_ms)\n writer.write_str_value('deviceId', self.device_id)\n writer.write_int_value('featureUpdateBootTimeInMs', self.feature_update_boot_time_in_ms)\n writer.write_int_value('groupPolicyBootTimeInMs', self.group_policy_boot_time_in_ms)\n writer.write_int_value('groupPolicyLoginTimeInMs', self.group_policy_login_time_in_ms)\n writer.write_bool_value('isFeatureUpdate', self.is_feature_update)\n writer.write_bool_value('isFirstLogin', self.is_first_login)\n writer.write_str_value('operatingSystemVersion', self.operating_system_version)\n writer.write_int_value('responsiveDesktopTimeInMs', self.responsive_desktop_time_in_ms)\n writer.write_enum_value('restartCategory', self.restart_category)\n writer.write_str_value('restartFaultBucket', self.restart_fault_bucket)\n writer.write_str_value('restartStopCode', self.restart_stop_code)\n writer.write_datetime_value('startTime', self.start_time)\n writer.write_int_value('totalBootTimeInMs', self.total_boot_time_in_ms)\n writer.write_int_value('totalLoginTimeInMs', self.total_login_time_in_ms)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000234", "length_bytes": 8363, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002875", "prompt": "Implement the Python class `UserExperienceAnalyticsDeviceStartupHistory` described below.\n\nClass description:\nThe user experience analytics device startup history entity contains device boot performance history details.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `UserExperienceAnalyticsDeviceStartupHistory` described below.\n\nClass description:\nThe user experience analytics device startup history entity contains device boot performance history details.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"The user experience analytics device startup history entity contains device boot performance history details.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UserExperienceAnalyticsDeviceStartupHistory()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n fields: Dict[str, Callable[[Any], None]] = {'coreBootTimeInMs': lambda n: setattr(self, 'core_boot_time_in_ms', n.get_int_value()), 'coreLoginTimeInMs': lambda n: setattr(self, 'core_login_time_in_ms', n.get_int_value()), 'deviceId': lambda n: setattr(self, 'device_id', n.get_str_value()), 'featureUpdateBootTimeInMs': lambda n: setattr(self, 'feature_update_boot_time_in_ms', n.get_int_value()), 'groupPolicyBootTimeInMs': lambda n: setattr(self, 'group_policy_boot_time_in_ms', n.get_int_value()), 'groupPolicyLoginTimeInMs': lambda n: setattr(self, 'group_policy_login_time_in_ms', n.get_int_value()), 'isFeatureUpdate': lambda n: setattr(self, 'is_feature_update', n.get_bool_value()), 'isFirstLogin': lambda n: setattr(self, 'is_first_login', n.get_bool_value()), 'operatingSystemVersion': lambda n: setattr(self, 'operating_system_version', n.get_str_value()), 'responsiveDesktopTimeInMs': lambda n: setattr(self, 'responsive_desktop_time_in_ms', n.get_int_value()), 'restartCategory': lambda n: setattr(self, 'restart_category', n.get_enum_value(UserExperienceAnalyticsOperatingSystemRestartCategory)), 'restartFaultBucket': lambda n: setattr(self, 'restart_fault_bucket', n.get_str_value()), 'restartStopCode': lambda n: setattr(self, 'restart_stop_code', n.get_str_value()), 'startTime': lambda n: setattr(self, 'start_time', n.get_datetime_value()), 'totalBootTimeInMs': lambda n: setattr(self, 'total_boot_time_in_ms', n.get_int_value()), 'totalLoginTimeInMs': lambda n: setattr(self, 'total_login_time_in_ms', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('coreBootTimeInMs', self.core_boot_time_in_ms)\n writer.write_int_value('coreLoginTimeInMs', self.core_login_time_in_ms)\n writer.write_str_value('deviceId', self.device_id)\n writer.write_int_value('featureUpdateBootTimeInMs', self.feature_update_boot_time_in_ms)\n writer.write_int_value('groupPolicyBootTimeInMs', self.group_policy_boot_time_in_ms)\n writer.write_int_value('groupPolicyLoginTimeInMs', self.group_policy_login_time_in_ms)\n writer.write_bool_value('isFeatureUpdate', self.is_feature_update)\n writer.write_bool_value('isFirstLogin', self.is_first_login)\n writer.write_str_value('operatingSystemVersion', self.operating_system_version)\n writer.write_int_value('responsiveDesktopTimeInMs', self.responsive_desktop_time_in_ms)\n writer.write_enum_value('restartCategory', self.restart_category)\n writer.write_str_value('restartFaultBucket', self.restart_fault_bucket)\n writer.write_str_value('restartStopCode', self.restart_stop_code)\n writer.write_datetime_value('startTime', self.start_time)\n writer.write_int_value('totalBootTimeInMs', self.total_boot_time_in_ms)\n writer.write_int_value('totalLoginTimeInMs', self.total_login_time_in_ms)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"The user experience analytics device startup history entity contains device boot performance history details.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"The user experience analytics device startup history entity contains device boot performance history details.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UserExperienceAnalyticsDeviceStartupHistory:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UserExperienceAnalyticsDeviceStartupHistory\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UserExperienceAnalyticsDeviceStartupHistory()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n from .entity import Entity\n from .user_experience_analytics_operating_system_restart_category import UserExperienceAnalyticsOperatingSystemRestartCategory\n fields: Dict[str, Callable[[Any], None]] = {'coreBootTimeInMs': lambda n: setattr(self, 'core_boot_time_in_ms', n.get_int_value()), 'coreLoginTimeInMs': lambda n: setattr(self, 'core_login_time_in_ms', n.get_int_value()), 'deviceId': lambda n: setattr(self, 'device_id', n.get_str_value()), 'featureUpdateBootTimeInMs': lambda n: setattr(self, 'feature_update_boot_time_in_ms', n.get_int_value()), 'groupPolicyBootTimeInMs': lambda n: setattr(self, 'group_policy_boot_time_in_ms', n.get_int_value()), 'groupPolicyLoginTimeInMs': lambda n: setattr(self, 'group_policy_login_time_in_ms', n.get_int_value()), 'isFeatureUpdate': lambda n: setattr(self, 'is_feature_update', n.get_bool_value()), 'isFirstLogin': lambda n: setattr(self, 'is_first_login', n.get_bool_value()), 'operatingSystemVersion': lambda n: setattr(self, 'operating_system_version', n.get_str_value()), 'responsiveDesktopTimeInMs': lambda n: setattr(self, 'responsive_desktop_time_in_ms', n.get_int_value()), 'restartCategory': lambda n: setattr(self, 'restart_category', n.get_enum_value(UserExperienceAnalyticsOperatingSystemRestartCategory)), 'restartFaultBucket': lambda n: setattr(self, 'restart_fault_bucket', n.get_str_value()), 'restartStopCode': lambda n: setattr(self, 'restart_stop_code', n.get_str_value()), 'startTime': lambda n: setattr(self, 'start_time', n.get_datetime_value()), 'totalBootTimeInMs': lambda n: setattr(self, 'total_boot_time_in_ms', n.get_int_value()), 'totalLoginTimeInMs': lambda n: setattr(self, 'total_login_time_in_ms', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_int_value('coreBootTimeInMs', self.core_boot_time_in_ms)\n writer.write_int_value('coreLoginTimeInMs', self.core_login_time_in_ms)\n writer.write_str_value('deviceId', self.device_id)\n writer.write_int_value('featureUpdateBootTimeInMs', self.feature_update_boot_time_in_ms)\n writer.write_int_value('groupPolicyBootTimeInMs', self.group_policy_boot_time_in_ms)\n writer.write_int_value('groupPolicyLoginTimeInMs', self.group_policy_login_time_in_ms)\n writer.write_bool_value('isFeatureUpdate', self.is_feature_update)\n writer.write_bool_value('isFirstLogin', self.is_first_login)\n writer.write_str_value('operatingSystemVersion', self.operating_system_version)\n writer.write_int_value('responsiveDesktopTimeInMs', self.responsive_desktop_time_in_ms)\n writer.write_enum_value('restartCategory', self.restart_category)\n writer.write_str_value('restartFaultBucket', self.restart_fault_bucket)\n writer.write_str_value('restartStopCode', self.restart_stop_code)\n writer.write_datetime_value('startTime', self.start_time)\n writer.write_int_value('totalBootTimeInMs', self.total_boot_time_in_ms)\n writer.write_int_value('totalLoginTimeInMs', self.total_login_time_in_ms)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/user_experience_analytics_device_startup_history.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135} {"blob_id": "307a2f37d11020c7123166945c957bdfa984b54d", "bodies": ["assert isinstance(block_string, str)\nops = block_string.split('_')\noptions = {}\nfor op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\nassert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\nreturn BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip='noskip' not in block_string, se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])", "args = [f'r{block.num_repeat:d}', f'k{block.kernel_size:d}', f's{block.strides[0]:d}{block.strides[1]:d}', f'e{block.expand_ratio}', f'i{block.input_filters:d}', f'o{block.output_filters:d}']\nif 0 < block.se_ratio <= 1:\n args.append(f'se{block.se_ratio}')\nif block.id_skip is False:\n args.append('noskip')\nreturn '_'.join(args)", "assert isinstance(string_list, list)\nblocks_args = []\nfor block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\nreturn blocks_args", "block_strings = []\nfor block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\nreturn block_strings"], "bodies_text": "<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip='noskip' not in block_string, se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])\n<|end_body_0|>\n\n<|body_start_1|>\n args = [f'r{block.num_repeat:d}', f'k{block.kernel_size:d}', f's{block.strides[0]:d}{block.strides[1]:d}', f'e{block.expand_ratio}', f'i{block.input_filters:d}', f'o{block.output_filters:d}']\n if 0 < block.se_ratio <= 1:\n args.append(f'se{block.se_ratio}')\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "class_docstring": "Block Decoder for readability, straight from the official TensorFlow repository", "class_name": "BlockDecoder", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Gets a block through a string notation of arguments.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encodes a block to a string.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip='noskip' not in block_string, se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])\n<|end_body_0|>\n\n<|body_start_1|>\n args = [f'r{block.num_repeat:d}', f'k{block.kernel_size:d}', f's{block.strides[0]:d}{block.strides[1]:d}', f'e{block.expand_ratio}', f'i{block.input_filters:d}', f'o{block.output_filters:d}']\n if 0 < block.se_ratio <= 1:\n args.append(f'se{block.se_ratio}')\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000235", "length_bytes": 14750, "license_type": "permissive", "methods": [{"docstring": "Gets a block through a string notation of arguments.", "name": "_decode_block_string", "signature": "def _decode_block_string(block_string)"}, {"docstring": "Encodes a block to a string.", "name": "_encode_block_string", "signature": "def _encode_block_string(block)"}, {"docstring": "Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args", "name": "decode", "signature": "def decode(string_list)"}, {"docstring": "Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block", "name": "encode", "signature": "def encode(blocks_args)"}], "n_methods": 4, "prompt": "Implement the Python class `BlockDecoder` described below.\n\nClass description:\nBlock Decoder for readability, straight from the official TensorFlow repository\n\nMethod signatures and docstrings:\n- def _decode_block_string(block_string): Gets a block through a string notation of arguments.\n- def _encode_block_string(block): Encodes a block to a string.\n- def decode(string_list): Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\n- def encode(blocks_args): Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block", "prompted_full_text": "Implement the Python class `BlockDecoder` described below.\n\nClass description:\nBlock Decoder for readability, straight from the official TensorFlow repository\n\nMethod signatures and docstrings:\n- def _decode_block_string(block_string): Gets a block through a string notation of arguments.\n- def _encode_block_string(block): Encodes a block to a string.\n- def decode(string_list): Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\n- def encode(blocks_args): Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block\n\n<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Gets a block through a string notation of arguments.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encodes a block to a string.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip='noskip' not in block_string, se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])\n<|end_body_0|>\n\n<|body_start_1|>\n args = [f'r{block.num_repeat:d}', f'k{block.kernel_size:d}', f's{block.strides[0]:d}{block.strides[1]:d}', f'e{block.expand_ratio}', f'i{block.input_filters:d}', f'o{block.output_filters:d}']\n if 0 < block.se_ratio <= 1:\n args.append(f'se{block.se_ratio}')\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "revision_id": "06839b08d8e8f274c02a6bcd31bf1b32d3dc04e4", "skeleton": "<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Gets a block through a string notation of arguments.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encodes a block to a string.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Gets a block through a string notation of arguments.\"\"\"\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip='noskip' not in block_string, se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])\n\n def _encode_block_string(block):\n \"\"\"Encodes a block to a string.\"\"\"\n args = [f'r{block.num_repeat:d}', f'k{block.kernel_size:d}', f's{block.strides[0]:d}{block.strides[1]:d}', f'e{block.expand_ratio}', f'i{block.input_filters:d}', f'o{block.output_filters:d}']\n if 0 < block.se_ratio <= 1:\n args.append(f'se{block.se_ratio}')\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n\n def decode(string_list):\n \"\"\"Decodes a list of string notations to specify blocks inside the network. :param string_list: a list of strings, each string is a notation of block :return: a list of BlockArgs namedtuples of block args\"\"\"\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n\n def encode(blocks_args):\n \"\"\"Encodes a list of BlockArgs to a list of strings. :param blocks_args: a list of BlockArgs namedtuples of block args :return: a list of strings, each string is a notation of block\"\"\"\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n", "source": "the_stack_v2_python_sparse", "source_path": "neodroidvision/detection/single_stage/ssd/architecture/backbones/efficient_net.py", "source_repo": "aivclab/vision", "split": "val", "star_events_count": 1} {"blob_id": "dedeb942280b6694051aeffc932caef4fea109ad", "bodies": ["results = self.form.search()\nif results.count() == 0 and len(self.request.GET) > 0 and (not 'q' in self.request.GET):\n results = SearchQuerySet()\nself.vs_query = ''\nif 'q' in self.request.GET:\n self.vs_query += ' text:' + self.request.GET.get('q')\ndocuments_ids = self.get_documents().values_list('id', flat=True)\nresults = results.filter(document_id__in=documents_ids)\nreturn results", "documents = Document.objects.all()\nif self.request.user.is_authenticated():\n permited_docs = get_objects_for_user(self.request.user, 'documents.access_document', Document, use_groups=True).values_list('id', flat=True)\n documents = documents.filter(Q(id__in=permited_docs) | Q(public=True))\nelse:\n documents = documents.filter(public=True)\nform = SearchDocumentForm(self.request.GET)\nif form.is_valid():\n opts = {}\n for key in form.cleaned_data:\n if form.cleaned_data[key] != '':\n opts[key + '__icontains'] = form.cleaned_data[key]\n self.vs_query += ' ' + key + ':' + form.cleaned_data[key]\n documents = documents.filter(**opts)\nreturn documents", "documents = SortedDict()\nfor r in self.results:\n if r.document_id in documents:\n documents[r.document_id]['pages'].append(r.object)\n else:\n documents[r.document_id] = {'id': r.object.document.id, 'document': r.object.document.document, 'pages': [r.object]}\npaginator = Paginator(documents.items(), 5)\ntry:\n page = self.request.GET.get('pag')\n docs = paginator.page(page)\nexcept PageNotAnInteger:\n docs = paginator.page(1)\nexcept EmptyPage:\n docs = paginator.page(paginator.num_pages)\ncp = self.request.GET.copy()\nif 'pag' in cp:\n cp.pop('pag')\nreturn {'docs': docs, 'total': len(documents), 'vs_query': self.vs_query, 'refs_fields': None, 'url_query': cp.urlencode}"], "bodies_text": "<|body_start_0|>\n results = self.form.search()\n if results.count() == 0 and len(self.request.GET) > 0 and (not 'q' in self.request.GET):\n results = SearchQuerySet()\n self.vs_query = ''\n if 'q' in self.request.GET:\n self.vs_query += ' text:' + self.request.GET.get('q')\n documents_ids = self.get_documents().values_list('id', flat=True)\n results = results.filter(document_id__in=documents_ids)\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n documents = Document.objects.all()\n if self.request.user.is_authenticated():\n permited_docs = get_objects_for_user(self.request.user, 'documents.access_document', Document, use_groups=True).values_list('id', flat=True)\n documents = documents.filter(Q(id__in=permited_docs) | Q(public=True))\n else:\n documents = documents.filter(public=True)\n form = SearchDocumentForm(self.request.GET)\n if form.is_valid():\n opts = {}\n for key in form.cleaned_data:\n if form.cleaned_data[key] != '':\n opts[key + '__icontains'] = form.cleaned_data[key]\n self.vs_query += ' ' + key + ':' + form.cleaned_data[key]\n documents = documents.filter(**opts)\n return documents\n<|end_body_1|>\n\n<|body_start_2|>\n documents = SortedDict()\n for r in self.results:\n if r.document_id in documents:\n documents[r.document_id]['pages'].append(r.object)\n else:\n documents[r.document_id] = {'id': r.object.document.id, 'document': r.object.document.document, 'pages': [r.object]}\n paginator = Paginator(documents.items(), 5)\n try:\n page = self.request.GET.get('pag')\n docs = paginator.page(page)\n except PageNotAnInteger:\n docs = paginator.page(1)\n except EmptyPage:\n docs = paginator.page(paginator.num_pages)\n cp = self.request.GET.copy()\n if 'pag' in cp:\n cp.pop('pag')\n return {'docs': docs, 'total': len(documents), 'vs_query': self.vs_query, 'refs_fields': None, 'url_query': cp.urlencode}\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SearchDocumentView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SearchDocumentView:\n\n def get_results(self):\n \"\"\"Fetches the results via the form. Returns an empty list if there's no query to search with.\"\"\"\n <|body_0|>\n\n def get_documents(self):\n \"\"\"Return the documents accordingly to specific search field\"\"\"\n <|body_1|>\n\n def extra_context(self):\n \"\"\"Allows the addition of more context variables as needed. Must return a dictionary.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n results = self.form.search()\n if results.count() == 0 and len(self.request.GET) > 0 and (not 'q' in self.request.GET):\n results = SearchQuerySet()\n self.vs_query = ''\n if 'q' in self.request.GET:\n self.vs_query += ' text:' + self.request.GET.get('q')\n documents_ids = self.get_documents().values_list('id', flat=True)\n results = results.filter(document_id__in=documents_ids)\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n documents = Document.objects.all()\n if self.request.user.is_authenticated():\n permited_docs = get_objects_for_user(self.request.user, 'documents.access_document', Document, use_groups=True).values_list('id', flat=True)\n documents = documents.filter(Q(id__in=permited_docs) | Q(public=True))\n else:\n documents = documents.filter(public=True)\n form = SearchDocumentForm(self.request.GET)\n if form.is_valid():\n opts = {}\n for key in form.cleaned_data:\n if form.cleaned_data[key] != '':\n opts[key + '__icontains'] = form.cleaned_data[key]\n self.vs_query += ' ' + key + ':' + form.cleaned_data[key]\n documents = documents.filter(**opts)\n return documents\n<|end_body_1|>\n\n<|body_start_2|>\n documents = SortedDict()\n for r in self.results:\n if r.document_id in documents:\n documents[r.document_id]['pages'].append(r.object)\n else:\n documents[r.document_id] = {'id': r.object.document.id, 'document': r.object.document.document, 'pages': [r.object]}\n paginator = Paginator(documents.items(), 5)\n try:\n page = self.request.GET.get('pag')\n docs = paginator.page(page)\n except PageNotAnInteger:\n docs = paginator.page(1)\n except EmptyPage:\n docs = paginator.page(paginator.num_pages)\n cp = self.request.GET.copy()\n if 'pag' in cp:\n cp.pop('pag')\n return {'docs': docs, 'total': len(documents), 'vs_query': self.vs_query, 'refs_fields': None, 'url_query': cp.urlencode}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000236", "length_bytes": 12858, "license_type": "no_license", "methods": [{"docstring": "Fetches the results via the form. Returns an empty list if there's no query to search with.", "name": "get_results", "signature": "def get_results(self)"}, {"docstring": "Return the documents accordingly to specific search field", "name": "get_documents", "signature": "def get_documents(self)"}, {"docstring": "Allows the addition of more context variables as needed. Must return a dictionary.", "name": "extra_context", "signature": "def extra_context(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004833", "prompt": "Implement the Python class `SearchDocumentView` described below.\n\nClass description:\nImplement the SearchDocumentView class.\n\nMethod signatures and docstrings:\n- def get_results(self): Fetches the results via the form. Returns an empty list if there's no query to search with.\n- def get_documents(self): Return the documents accordingly to specific search field\n- def extra_context(self): Allows the addition of more context variables as needed. Must return a dictionary.", "prompted_full_text": "Implement the Python class `SearchDocumentView` described below.\n\nClass description:\nImplement the SearchDocumentView class.\n\nMethod signatures and docstrings:\n- def get_results(self): Fetches the results via the form. Returns an empty list if there's no query to search with.\n- def get_documents(self): Return the documents accordingly to specific search field\n- def extra_context(self): Allows the addition of more context variables as needed. Must return a dictionary.\n\n<|skeleton|>\nclass SearchDocumentView:\n\n def get_results(self):\n \"\"\"Fetches the results via the form. Returns an empty list if there's no query to search with.\"\"\"\n <|body_0|>\n\n def get_documents(self):\n \"\"\"Return the documents accordingly to specific search field\"\"\"\n <|body_1|>\n\n def extra_context(self):\n \"\"\"Allows the addition of more context variables as needed. Must return a dictionary.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n results = self.form.search()\n if results.count() == 0 and len(self.request.GET) > 0 and (not 'q' in self.request.GET):\n results = SearchQuerySet()\n self.vs_query = ''\n if 'q' in self.request.GET:\n self.vs_query += ' text:' + self.request.GET.get('q')\n documents_ids = self.get_documents().values_list('id', flat=True)\n results = results.filter(document_id__in=documents_ids)\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n documents = Document.objects.all()\n if self.request.user.is_authenticated():\n permited_docs = get_objects_for_user(self.request.user, 'documents.access_document', Document, use_groups=True).values_list('id', flat=True)\n documents = documents.filter(Q(id__in=permited_docs) | Q(public=True))\n else:\n documents = documents.filter(public=True)\n form = SearchDocumentForm(self.request.GET)\n if form.is_valid():\n opts = {}\n for key in form.cleaned_data:\n if form.cleaned_data[key] != '':\n opts[key + '__icontains'] = form.cleaned_data[key]\n self.vs_query += ' ' + key + ':' + form.cleaned_data[key]\n documents = documents.filter(**opts)\n return documents\n<|end_body_1|>\n\n<|body_start_2|>\n documents = SortedDict()\n for r in self.results:\n if r.document_id in documents:\n documents[r.document_id]['pages'].append(r.object)\n else:\n documents[r.document_id] = {'id': r.object.document.id, 'document': r.object.document.document, 'pages': [r.object]}\n paginator = Paginator(documents.items(), 5)\n try:\n page = self.request.GET.get('pag')\n docs = paginator.page(page)\n except PageNotAnInteger:\n docs = paginator.page(1)\n except EmptyPage:\n docs = paginator.page(paginator.num_pages)\n cp = self.request.GET.copy()\n if 'pag' in cp:\n cp.pop('pag')\n return {'docs': docs, 'total': len(documents), 'vs_query': self.vs_query, 'refs_fields': None, 'url_query': cp.urlencode}\n<|end_body_2|>\n", "revision_id": "29352a49a01bedb57be85896d0d31e627bb9e5bf", "skeleton": "<|skeleton|>\nclass SearchDocumentView:\n\n def get_results(self):\n \"\"\"Fetches the results via the form. Returns an empty list if there's no query to search with.\"\"\"\n <|body_0|>\n\n def get_documents(self):\n \"\"\"Return the documents accordingly to specific search field\"\"\"\n <|body_1|>\n\n def extra_context(self):\n \"\"\"Allows the addition of more context variables as needed. Must return a dictionary.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SearchDocumentView:\n def get_results(self):\n \"\"\"Fetches the results via the form. Returns an empty list if there's no query to search with.\"\"\"\n results = self.form.search()\n if results.count() == 0 and len(self.request.GET) > 0 and (not 'q' in self.request.GET):\n results = SearchQuerySet()\n self.vs_query = ''\n if 'q' in self.request.GET:\n self.vs_query += ' text:' + self.request.GET.get('q')\n documents_ids = self.get_documents().values_list('id', flat=True)\n results = results.filter(document_id__in=documents_ids)\n return results\n\n def get_documents(self):\n \"\"\"Return the documents accordingly to specific search field\"\"\"\n documents = Document.objects.all()\n if self.request.user.is_authenticated():\n permited_docs = get_objects_for_user(self.request.user, 'documents.access_document', Document, use_groups=True).values_list('id', flat=True)\n documents = documents.filter(Q(id__in=permited_docs) | Q(public=True))\n else:\n documents = documents.filter(public=True)\n form = SearchDocumentForm(self.request.GET)\n if form.is_valid():\n opts = {}\n for key in form.cleaned_data:\n if form.cleaned_data[key] != '':\n opts[key + '__icontains'] = form.cleaned_data[key]\n self.vs_query += ' ' + key + ':' + form.cleaned_data[key]\n documents = documents.filter(**opts)\n return documents\n\n def extra_context(self):\n \"\"\"Allows the addition of more context variables as needed. Must return a dictionary.\"\"\"\n documents = SortedDict()\n for r in self.results:\n if r.document_id in documents:\n documents[r.document_id]['pages'].append(r.object)\n else:\n documents[r.document_id] = {'id': r.object.document.id, 'document': r.object.document.document, 'pages': [r.object]}\n paginator = Paginator(documents.items(), 5)\n try:\n page = self.request.GET.get('pag')\n docs = paginator.page(page)\n except PageNotAnInteger:\n docs = paginator.page(1)\n except EmptyPage:\n docs = paginator.page(paginator.num_pages)\n cp = self.request.GET.copy()\n if 'pag' in cp:\n cp.pop('pag')\n return {'docs': docs, 'total': len(documents), 'vs_query': self.vs_query, 'refs_fields': None, 'url_query': cp.urlencode}\n", "source": "the_stack_v2_python_sparse", "source_path": "documents/views.py", "source_repo": "yierva/festos", "split": "val", "star_events_count": 0} {"blob_id": "62386ebc8b047e999fc824228178cfa5c6203114", "bodies": ["self.size = 0\nself.head, self.tail = (Node(0), Node(0))\nself.head.next = self.tail\nself.tail.prev = self.head", "if index < 0 or index >= self.size:\n return -1\nif index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\nelse:\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\nreturn ptr.val", "pred, succ = (self.head, self.head.next)\ncur = Node(val)\ncur.next = succ\ncur.prev = pred\npred.next = cur\nsucc.prev = cur\nself.size += 1", "pred, succ = (self.tail.prev, self.tail)\ncur = Node(val)\ncur.next = succ\ncur.prev = pred\npred.next = cur\nsucc.prev = cur\nself.size += 1", "if index > self.size:\n return\nif index < 0:\n index = 0\nif index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\nelse:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\nself.size += 1\nto_add = ListNode(val)\nto_add.prev = pred\nto_add.next = succ\npred.next = to_add\nsucc.prev = to_add", "if index < 0 or index >= self.size:\n return\nif index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\nelse:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\nself.size -= 1\npred.next = succ\nsucc.prev = pred"], "bodies_text": "<|body_start_0|>\n self.size = 0\n self.head, self.tail = (Node(0), Node(0))\n self.head.next = self.tail\n self.tail.prev = self.head\n<|end_body_0|>\n\n<|body_start_1|>\n if index < 0 or index >= self.size:\n return -1\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else:\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val\n<|end_body_1|>\n\n<|body_start_2|>\n pred, succ = (self.head, self.head.next)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n pred, succ = (self.tail.prev, self.tail)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_3|>\n\n<|body_start_4|>\n if index > self.size:\n return\n if index < 0:\n index = 0\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\n else:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\n self.size += 1\n to_add = ListNode(val)\n to_add.prev = pred\n to_add.next = succ\n pred.next = to_add\n succ.prev = to_add\n<|end_body_4|>\n\n<|body_start_5|>\n if index < 0 or index >= self.size:\n return\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n self.size -= 1\n pred.next = succ\n succ.prev = pred\n<|end_body_5|>\n", "class_docstring": "", "class_name": "MyLinkedList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyLinkedList:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def get(self, index: int) -> int:\n \"\"\"Get the value of the index-th node in the linked list. If the index is invalid, return -1.\"\"\"\n <|body_1|>\n\n def addAtHead(self, val: int) -> None:\n \"\"\"Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\"\"\"\n <|body_2|>\n\n def addAtTail(self, val: int) -> None:\n \"\"\"Append a node of value val to the last element of the linked list.\"\"\"\n <|body_3|>\n\n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\"\"\"\n <|body_4|>\n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"Delete the index-th node in the linked list, if the index is valid.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.size = 0\n self.head, self.tail = (Node(0), Node(0))\n self.head.next = self.tail\n self.tail.prev = self.head\n<|end_body_0|>\n\n<|body_start_1|>\n if index < 0 or index >= self.size:\n return -1\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else:\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val\n<|end_body_1|>\n\n<|body_start_2|>\n pred, succ = (self.head, self.head.next)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n pred, succ = (self.tail.prev, self.tail)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_3|>\n\n<|body_start_4|>\n if index > self.size:\n return\n if index < 0:\n index = 0\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\n else:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\n self.size += 1\n to_add = ListNode(val)\n to_add.prev = pred\n to_add.next = succ\n pred.next = to_add\n succ.prev = to_add\n<|end_body_4|>\n\n<|body_start_5|>\n if index < 0 or index >= self.size:\n return\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n self.size -= 1\n pred.next = succ\n succ.prev = pred\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000237", "length_bytes": 3992, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Get the value of the index-th node in the linked list. If the index is invalid, return -1.", "name": "get", "signature": "def get(self, index: int) -> int"}, {"docstring": "Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.", "name": "addAtHead", "signature": "def addAtHead(self, val: int) -> None"}, {"docstring": "Append a node of value val to the last element of the linked list.", "name": "addAtTail", "signature": "def addAtTail(self, val: int) -> None"}, {"docstring": "Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.", "name": "addAtIndex", "signature": "def addAtIndex(self, index: int, val: int) -> None"}, {"docstring": "Delete the index-th node in the linked list, if the index is valid.", "name": "deleteAtIndex", "signature": "def deleteAtIndex(self, index: int) -> None"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_005523", "prompt": "Implement the Python class `MyLinkedList` described below.\n\nClass description:\nImplement the MyLinkedList class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def get(self, index: int) -> int: Get the value of the index-th node in the linked list. If the index is invalid, return -1.\n- def addAtHead(self, val: int) -> None: Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\n- def addAtTail(self, val: int) -> None: Append a node of value val to the last element of the linked list.\n- def addAtIndex(self, index: int, val: int) -> None: Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\n- def deleteAtIndex(self, index: int) -> None: Delete the index-th node in the linked list, if the index is valid.", "prompted_full_text": "Implement the Python class `MyLinkedList` described below.\n\nClass description:\nImplement the MyLinkedList class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def get(self, index: int) -> int: Get the value of the index-th node in the linked list. If the index is invalid, return -1.\n- def addAtHead(self, val: int) -> None: Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\n- def addAtTail(self, val: int) -> None: Append a node of value val to the last element of the linked list.\n- def addAtIndex(self, index: int, val: int) -> None: Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\n- def deleteAtIndex(self, index: int) -> None: Delete the index-th node in the linked list, if the index is valid.\n\n<|skeleton|>\nclass MyLinkedList:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def get(self, index: int) -> int:\n \"\"\"Get the value of the index-th node in the linked list. If the index is invalid, return -1.\"\"\"\n <|body_1|>\n\n def addAtHead(self, val: int) -> None:\n \"\"\"Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\"\"\"\n <|body_2|>\n\n def addAtTail(self, val: int) -> None:\n \"\"\"Append a node of value val to the last element of the linked list.\"\"\"\n <|body_3|>\n\n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\"\"\"\n <|body_4|>\n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"Delete the index-th node in the linked list, if the index is valid.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.size = 0\n self.head, self.tail = (Node(0), Node(0))\n self.head.next = self.tail\n self.tail.prev = self.head\n<|end_body_0|>\n\n<|body_start_1|>\n if index < 0 or index >= self.size:\n return -1\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else:\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val\n<|end_body_1|>\n\n<|body_start_2|>\n pred, succ = (self.head, self.head.next)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n pred, succ = (self.tail.prev, self.tail)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n<|end_body_3|>\n\n<|body_start_4|>\n if index > self.size:\n return\n if index < 0:\n index = 0\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\n else:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\n self.size += 1\n to_add = ListNode(val)\n to_add.prev = pred\n to_add.next = succ\n pred.next = to_add\n succ.prev = to_add\n<|end_body_4|>\n\n<|body_start_5|>\n if index < 0 or index >= self.size:\n return\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n self.size -= 1\n pred.next = succ\n succ.prev = pred\n<|end_body_5|>\n", "revision_id": "30198097904994e34f8321926ad2a2cadc8b5940", "skeleton": "<|skeleton|>\nclass MyLinkedList:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def get(self, index: int) -> int:\n \"\"\"Get the value of the index-th node in the linked list. If the index is invalid, return -1.\"\"\"\n <|body_1|>\n\n def addAtHead(self, val: int) -> None:\n \"\"\"Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\"\"\"\n <|body_2|>\n\n def addAtTail(self, val: int) -> None:\n \"\"\"Append a node of value val to the last element of the linked list.\"\"\"\n <|body_3|>\n\n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\"\"\"\n <|body_4|>\n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"Delete the index-th node in the linked list, if the index is valid.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyLinkedList:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.size = 0\n self.head, self.tail = (Node(0), Node(0))\n self.head.next = self.tail\n self.tail.prev = self.head\n\n def get(self, index: int) -> int:\n \"\"\"Get the value of the index-th node in the linked list. If the index is invalid, return -1.\"\"\"\n if index < 0 or index >= self.size:\n return -1\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else:\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val\n\n def addAtHead(self, val: int) -> None:\n \"\"\"Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.\"\"\"\n pred, succ = (self.head, self.head.next)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n\n def addAtTail(self, val: int) -> None:\n \"\"\"Append a node of value val to the last element of the linked list.\"\"\"\n pred, succ = (self.tail.prev, self.tail)\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n\n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\"\"\"\n if index > self.size:\n return\n if index < 0:\n index = 0\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\n else:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\n self.size += 1\n to_add = ListNode(val)\n to_add.prev = pred\n to_add.next = succ\n pred.next = to_add\n succ.prev = to_add\n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"Delete the index-th node in the linked list, if the index is valid.\"\"\"\n if index < 0 or index >= self.size:\n return\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n self.size -= 1\n pred.next = succ\n succ.prev = pred\n", "source": "the_stack_v2_python_sparse", "source_path": "coding/leetcode/707-design-linked-list/solution.py", "source_repo": "teckoo/interview_public", "split": "val", "star_events_count": 2} {"blob_id": "8fed59678ddeabe8b7060bdccc4745817cd442ab", "bodies": ["if random_seed is not None:\n self.random_generator = random.Random(random_seed)\nelse:\n self.random_generator = random.Random()\nsuper().__init__(expression_data=expression_data, calculator=calculator, rm_outliers=rm_outliers)", "n_genes = self.expression_data.n_points\npairs = self.get_random_pairs(n_pairs, n_genes, adjust_n)\nsimilarities_list = list()\nfor pair in pairs:\n index1 = pair[0]\n index2 = pair[1]\n gene1 = self.expression_data.get_genes_data_index(index1)\n gene2 = self.expression_data.get_genes_data_index(index2)\n similarity = self.calculator.similarity(gene1, gene2)\n similarities_list.append(similarity)\nif self.rm_outliers:\n similarities_list = self.remove_outliers(similarities_list)\nreturn similarities_list", "if n_pairs < 1:\n raise ValueError('Number of pairs must be at least 1.')\nif n_points2 is None:\n n_possible_pairs = possible_pairs(n_points)\nelse:\n n_possible_pairs = n_points * n_points2\nif n_pairs > n_possible_pairs:\n if adjust_n:\n n_pairs = n_possible_pairs\n else:\n raise ValueError('Number of pairs is greater than number of possible unique pairs: ', n_possible_pairs)\npairs = set()\nwhile len(pairs) < n_pairs:\n max2 = None if n_points2 is None else n_points2 - 1\n pair = self.generate_index_pair(min_index=0, max_index=n_points - 1, max_index2=max2)\n pairs.add(pair)\nreturn pairs", "index1 = self.random_generator.randint(min_index, max_index)\nmax2 = max_index if max_index2 is None else max_index2\nindex2 = self.random_generator.randint(min_index, max2)\nif max_index2 is None:\n while index1 == index2:\n index2 = self.random_generator.randint(min_index, max2)\n if index1 > index2:\n index3 = index2\n index2 = index1\n index1 = index3\nreturn (index1, index2)"], "bodies_text": "<|body_start_0|>\n if random_seed is not None:\n self.random_generator = random.Random(random_seed)\n else:\n self.random_generator = random.Random()\n super().__init__(expression_data=expression_data, calculator=calculator, rm_outliers=rm_outliers)\n<|end_body_0|>\n\n<|body_start_1|>\n n_genes = self.expression_data.n_points\n pairs = self.get_random_pairs(n_pairs, n_genes, adjust_n)\n similarities_list = list()\n for pair in pairs:\n index1 = pair[0]\n index2 = pair[1]\n gene1 = self.expression_data.get_genes_data_index(index1)\n gene2 = self.expression_data.get_genes_data_index(index2)\n similarity = self.calculator.similarity(gene1, gene2)\n similarities_list.append(similarity)\n if self.rm_outliers:\n similarities_list = self.remove_outliers(similarities_list)\n return similarities_list\n<|end_body_1|>\n\n<|body_start_2|>\n if n_pairs < 1:\n raise ValueError('Number of pairs must be at least 1.')\n if n_points2 is None:\n n_possible_pairs = possible_pairs(n_points)\n else:\n n_possible_pairs = n_points * n_points2\n if n_pairs > n_possible_pairs:\n if adjust_n:\n n_pairs = n_possible_pairs\n else:\n raise ValueError('Number of pairs is greater than number of possible unique pairs: ', n_possible_pairs)\n pairs = set()\n while len(pairs) < n_pairs:\n max2 = None if n_points2 is None else n_points2 - 1\n pair = self.generate_index_pair(min_index=0, max_index=n_points - 1, max_index2=max2)\n pairs.add(pair)\n return pairs\n<|end_body_2|>\n\n<|body_start_3|>\n index1 = self.random_generator.randint(min_index, max_index)\n max2 = max_index if max_index2 is None else max_index2\n index2 = self.random_generator.randint(min_index, max2)\n if max_index2 is None:\n while index1 == index2:\n index2 = self.random_generator.randint(min_index, max2)\n if index1 > index2:\n index3 = index2\n index2 = index1\n index1 = index3\n return (index1, index2)\n<|end_body_3|>\n", "class_docstring": "Navigate similarity calculation between random points.", "class_name": "RandomSimilarityCalculatorNavigator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RandomSimilarityCalculatorNavigator:\n \"\"\"Navigate similarity calculation between random points.\"\"\"\n\n def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True):\n \"\"\":param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\"\"\"\n <|body_0|>\n\n def similarities(self, n_pairs: int, adjust_n: bool=False) -> list:\n \"\"\"Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\"\"\"\n <|body_1|>\n\n def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set:\n \"\"\"Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\"\"\"\n <|body_2|>\n\n def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple:\n \"\"\"Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if random_seed is not None:\n self.random_generator = random.Random(random_seed)\n else:\n self.random_generator = random.Random()\n super().__init__(expression_data=expression_data, calculator=calculator, rm_outliers=rm_outliers)\n<|end_body_0|>\n\n<|body_start_1|>\n n_genes = self.expression_data.n_points\n pairs = self.get_random_pairs(n_pairs, n_genes, adjust_n)\n similarities_list = list()\n for pair in pairs:\n index1 = pair[0]\n index2 = pair[1]\n gene1 = self.expression_data.get_genes_data_index(index1)\n gene2 = self.expression_data.get_genes_data_index(index2)\n similarity = self.calculator.similarity(gene1, gene2)\n similarities_list.append(similarity)\n if self.rm_outliers:\n similarities_list = self.remove_outliers(similarities_list)\n return similarities_list\n<|end_body_1|>\n\n<|body_start_2|>\n if n_pairs < 1:\n raise ValueError('Number of pairs must be at least 1.')\n if n_points2 is None:\n n_possible_pairs = possible_pairs(n_points)\n else:\n n_possible_pairs = n_points * n_points2\n if n_pairs > n_possible_pairs:\n if adjust_n:\n n_pairs = n_possible_pairs\n else:\n raise ValueError('Number of pairs is greater than number of possible unique pairs: ', n_possible_pairs)\n pairs = set()\n while len(pairs) < n_pairs:\n max2 = None if n_points2 is None else n_points2 - 1\n pair = self.generate_index_pair(min_index=0, max_index=n_points - 1, max_index2=max2)\n pairs.add(pair)\n return pairs\n<|end_body_2|>\n\n<|body_start_3|>\n index1 = self.random_generator.randint(min_index, max_index)\n max2 = max_index if max_index2 is None else max_index2\n index2 = self.random_generator.randint(min_index, max2)\n if max_index2 is None:\n while index1 == index2:\n index2 = self.random_generator.randint(min_index, max2)\n if index1 > index2:\n index3 = index2\n index2 = index1\n index1 = index3\n return (index1, index2)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000238", "length_bytes": 43977, "license_type": "no_license", "methods": [{"docstring": ":param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation", "name": "__init__", "signature": "def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True)"}, {"docstring": "Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities", "name": "similarities", "signature": "def similarities(self, n_pairs: int, adjust_n: bool=False) -> list"}, {"docstring": "Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs", "name": "get_random_pairs", "signature": "def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set"}, {"docstring": "Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None", "name": "generate_index_pair", "signature": "def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000230", "prompt": "Implement the Python class `RandomSimilarityCalculatorNavigator` described below.\n\nClass description:\nNavigate similarity calculation between random points.\n\nMethod signatures and docstrings:\n- def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True): :param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\n- def similarities(self, n_pairs: int, adjust_n: bool=False) -> list: Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\n- def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set: Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\n- def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple: Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None", "prompted_full_text": "Implement the Python class `RandomSimilarityCalculatorNavigator` described below.\n\nClass description:\nNavigate similarity calculation between random points.\n\nMethod signatures and docstrings:\n- def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True): :param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\n- def similarities(self, n_pairs: int, adjust_n: bool=False) -> list: Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\n- def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set: Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\n- def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple: Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None\n\n<|skeleton|>\nclass RandomSimilarityCalculatorNavigator:\n \"\"\"Navigate similarity calculation between random points.\"\"\"\n\n def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True):\n \"\"\":param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\"\"\"\n <|body_0|>\n\n def similarities(self, n_pairs: int, adjust_n: bool=False) -> list:\n \"\"\"Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\"\"\"\n <|body_1|>\n\n def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set:\n \"\"\"Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\"\"\"\n <|body_2|>\n\n def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple:\n \"\"\"Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if random_seed is not None:\n self.random_generator = random.Random(random_seed)\n else:\n self.random_generator = random.Random()\n super().__init__(expression_data=expression_data, calculator=calculator, rm_outliers=rm_outliers)\n<|end_body_0|>\n\n<|body_start_1|>\n n_genes = self.expression_data.n_points\n pairs = self.get_random_pairs(n_pairs, n_genes, adjust_n)\n similarities_list = list()\n for pair in pairs:\n index1 = pair[0]\n index2 = pair[1]\n gene1 = self.expression_data.get_genes_data_index(index1)\n gene2 = self.expression_data.get_genes_data_index(index2)\n similarity = self.calculator.similarity(gene1, gene2)\n similarities_list.append(similarity)\n if self.rm_outliers:\n similarities_list = self.remove_outliers(similarities_list)\n return similarities_list\n<|end_body_1|>\n\n<|body_start_2|>\n if n_pairs < 1:\n raise ValueError('Number of pairs must be at least 1.')\n if n_points2 is None:\n n_possible_pairs = possible_pairs(n_points)\n else:\n n_possible_pairs = n_points * n_points2\n if n_pairs > n_possible_pairs:\n if adjust_n:\n n_pairs = n_possible_pairs\n else:\n raise ValueError('Number of pairs is greater than number of possible unique pairs: ', n_possible_pairs)\n pairs = set()\n while len(pairs) < n_pairs:\n max2 = None if n_points2 is None else n_points2 - 1\n pair = self.generate_index_pair(min_index=0, max_index=n_points - 1, max_index2=max2)\n pairs.add(pair)\n return pairs\n<|end_body_2|>\n\n<|body_start_3|>\n index1 = self.random_generator.randint(min_index, max_index)\n max2 = max_index if max_index2 is None else max_index2\n index2 = self.random_generator.randint(min_index, max2)\n if max_index2 is None:\n while index1 == index2:\n index2 = self.random_generator.randint(min_index, max2)\n if index1 > index2:\n index3 = index2\n index2 = index1\n index1 = index3\n return (index1, index2)\n<|end_body_3|>\n", "revision_id": "6d11df5e8ca37e53e048d261ac287f859ba6e9b9", "skeleton": "<|skeleton|>\nclass RandomSimilarityCalculatorNavigator:\n \"\"\"Navigate similarity calculation between random points.\"\"\"\n\n def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True):\n \"\"\":param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\"\"\"\n <|body_0|>\n\n def similarities(self, n_pairs: int, adjust_n: bool=False) -> list:\n \"\"\"Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\"\"\"\n <|body_1|>\n\n def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set:\n \"\"\"Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\"\"\"\n <|body_2|>\n\n def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple:\n \"\"\"Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RandomSimilarityCalculatorNavigator:\n \"\"\"Navigate similarity calculation between random points.\"\"\"\n\n def __init__(self, expression_data: GeneExpression, calculator: SimilarityCalculator, random_seed: int=None, rm_outliers: bool=True):\n \"\"\":param expression_data: Data for all genes :param calculator: SimilarityCalculator used for all calculations :param random_seed: seed to be used for random number generator, used to determine which pairs will be used for distance calculations None sets the default random library seed :param rm_outliers: Should outliers be removed before summary statistics calculation\"\"\"\n if random_seed is not None:\n self.random_generator = random.Random(random_seed)\n else:\n self.random_generator = random.Random()\n super().__init__(expression_data=expression_data, calculator=calculator, rm_outliers=rm_outliers)\n\n def similarities(self, n_pairs: int, adjust_n: bool=False) -> list:\n \"\"\"Calculate similarities between random pairs. :param n_pairs: Number of pairs. :param adjust_n: Should number of pairs be adjusted to max possible number of pairs if n_pairs is larger than possible number of pairs based on n rows in expression data. :return: list of similarities\"\"\"\n n_genes = self.expression_data.n_points\n pairs = self.get_random_pairs(n_pairs, n_genes, adjust_n)\n similarities_list = list()\n for pair in pairs:\n index1 = pair[0]\n index2 = pair[1]\n gene1 = self.expression_data.get_genes_data_index(index1)\n gene2 = self.expression_data.get_genes_data_index(index2)\n similarity = self.calculator.similarity(gene1, gene2)\n similarities_list.append(similarity)\n if self.rm_outliers:\n similarities_list = self.remove_outliers(similarities_list)\n return similarities_list\n\n def get_random_pairs(self, n_pairs, n_points, adjust_n: bool=False, n_points2: int=None) -> set:\n \"\"\"Get random index pairs from n_points. If n_points2 is none draws pairs of two different elements from n_points-1; else draws one point from n_points-1 and the other from n_points2-1. Indices can be between 0 and n_points-1 (or n_points2-1). No pairs are repeated. :param n_pairs: n of index pairs :param n_points: n of points/indices :param adjust_n: As in similarities method. :param n_points2: n of points for the second element of a pair, draws points from two datasets (with n_points and n_points2 elements), if None draws pairs from a single dataset with n_points :return: index pairs\"\"\"\n if n_pairs < 1:\n raise ValueError('Number of pairs must be at least 1.')\n if n_points2 is None:\n n_possible_pairs = possible_pairs(n_points)\n else:\n n_possible_pairs = n_points * n_points2\n if n_pairs > n_possible_pairs:\n if adjust_n:\n n_pairs = n_possible_pairs\n else:\n raise ValueError('Number of pairs is greater than number of possible unique pairs: ', n_possible_pairs)\n pairs = set()\n while len(pairs) < n_pairs:\n max2 = None if n_points2 is None else n_points2 - 1\n pair = self.generate_index_pair(min_index=0, max_index=n_points - 1, max_index2=max2)\n pairs.add(pair)\n return pairs\n\n def generate_index_pair(self, max_index: int, min_index: int=0, max_index2: int=None) -> tuple:\n \"\"\"Make a pair of indices. Pair of indices contains 2 different indices if max_index2 is None - draws from elements between min and max_index; else draws pairs with first element between min and max_index and second element between min and max_index2 elements. :param max_index: largest possible index, inclusive :param min_index: smallest possible index, inclusive :param max_index2: Largest possible index for second element of a pair, if None uses max_index for both elements :return: index pair, left index is always smaller if max_index2 is None\"\"\"\n index1 = self.random_generator.randint(min_index, max_index)\n max2 = max_index if max_index2 is None else max_index2\n index2 = self.random_generator.randint(min_index, max2)\n if max_index2 is None:\n while index1 == index2:\n index2 = self.random_generator.randint(min_index, max2)\n if index1 > index2:\n index3 = index2\n index2 = index1\n index1 = index3\n return (index1, index2)\n", "source": "the_stack_v2_python_sparse", "source_path": "correlation_enrichment/library_correlation_enrichment.py", "source_repo": "biolab/baylor-dicty", "split": "val", "star_events_count": 0} {"blob_id": "57c970e2a82aaaaff91bf95c5fd01279f12e18fe", "bodies": ["res = []\nfor each in A:\n tem = []\n for each_char in each[::-1]:\n if each_char == 1:\n tem.append(0)\n else:\n tem.append(1)\n res.append(tem)\nreturn res", "res = []\nfor each in A:\n tem = []\n for each_char in each[::-1]:\n tem.append(1 - each_char)\n res.append(tem)\nreturn res"], "bodies_text": "<|body_start_0|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n if each_char == 1:\n tem.append(0)\n else:\n tem.append(1)\n res.append(tem)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n tem.append(1 - each_char)\n res.append(tem)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def flipAndInvertImage(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def flipAndInvertImage2(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n if each_char == 1:\n tem.append(0)\n else:\n tem.append(1)\n res.append(tem)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n tem.append(1 - each_char)\n res.append(tem)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000239", "length_bytes": 804, "license_type": "no_license", "methods": [{"docstring": ":type A: List[List[int]] :rtype: List[List[int]]", "name": "flipAndInvertImage", "signature": "def flipAndInvertImage(self, A)"}, {"docstring": ":type A: List[List[int]] :rtype: List[List[int]]", "name": "flipAndInvertImage2", "signature": "def flipAndInvertImage2(self, A)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003050", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def flipAndInvertImage(self, A): :type A: List[List[int]] :rtype: List[List[int]]\n- def flipAndInvertImage2(self, A): :type A: List[List[int]] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def flipAndInvertImage(self, A): :type A: List[List[int]] :rtype: List[List[int]]\n- def flipAndInvertImage2(self, A): :type A: List[List[int]] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def flipAndInvertImage(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def flipAndInvertImage2(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n if each_char == 1:\n tem.append(0)\n else:\n tem.append(1)\n res.append(tem)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n tem.append(1 - each_char)\n res.append(tem)\n return res\n<|end_body_1|>\n", "revision_id": "4105e18050b15fc0409c75353ad31be17187dd34", "skeleton": "<|skeleton|>\nclass Solution:\n\n def flipAndInvertImage(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def flipAndInvertImage2(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def flipAndInvertImage(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n if each_char == 1:\n tem.append(0)\n else:\n tem.append(1)\n res.append(tem)\n return res\n\n def flipAndInvertImage2(self, A):\n \"\"\":type A: List[List[int]] :rtype: List[List[int]]\"\"\"\n res = []\n for each in A:\n tem = []\n for each_char in each[::-1]:\n tem.append(1 - each_char)\n res.append(tem)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "flipAndInvertImage.py", "source_repo": "NeilWangziyu/Leetcode_py", "split": "val", "star_events_count": 2} {"blob_id": "1497f7442f6063be560803022fa58bda48ed1b2f", "bodies": ["def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n left = mid + 1\n else:\n right = mid - 1\n return left\nleft = binary_search(nums, target, lambda x, y: x < y)\nright = binary_search(nums, target, lambda x, y: x <= y)\nreturn [left, right - 1] if left < len(nums) and nums[left] == target else [-1, -1]", "def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n right = mid - 1\n else:\n left = mid + 1\n return left\nleft = binary_search(nums, target, lambda x, y: x >= y)\nif left >= len(nums) or nums[left] != target:\n return [-1, -1]\nright = binary_search(nums, target, lambda x, y: x > y)\nreturn [left, right - 1]", "index = [-1, -1]\ni = 0\nwhile i < len(nums):\n if nums[i] == target:\n if index[0] == -1:\n index[0] = index[1] = i\n else:\n index[1] = i\n i += 1\nreturn index"], "bodies_text": "<|body_start_0|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n left = mid + 1\n else:\n right = mid - 1\n return left\n left = binary_search(nums, target, lambda x, y: x < y)\n right = binary_search(nums, target, lambda x, y: x <= y)\n return [left, right - 1] if left < len(nums) and nums[left] == target else [-1, -1]\n<|end_body_0|>\n\n<|body_start_1|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n right = mid - 1\n else:\n left = mid + 1\n return left\n left = binary_search(nums, target, lambda x, y: x >= y)\n if left >= len(nums) or nums[left] != target:\n return [-1, -1]\n right = binary_search(nums, target, lambda x, y: x > y)\n return [left, right - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n index = [-1, -1]\n i = 0\n while i < len(nums):\n if nums[i] == target:\n if index[0] == -1:\n index[0] = index[1] = i\n else:\n index[1] = i\n i += 1\n return index\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def searchRange(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def searchRange_v2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def searchRange_linear(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n left = mid + 1\n else:\n right = mid - 1\n return left\n left = binary_search(nums, target, lambda x, y: x < y)\n right = binary_search(nums, target, lambda x, y: x <= y)\n return [left, right - 1] if left < len(nums) and nums[left] == target else [-1, -1]\n<|end_body_0|>\n\n<|body_start_1|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n right = mid - 1\n else:\n left = mid + 1\n return left\n left = binary_search(nums, target, lambda x, y: x >= y)\n if left >= len(nums) or nums[left] != target:\n return [-1, -1]\n right = binary_search(nums, target, lambda x, y: x > y)\n return [left, right - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n index = [-1, -1]\n i = 0\n while i < len(nums):\n if nums[i] == target:\n if index[0] == -1:\n index[0] = index[1] = i\n else:\n index[1] = i\n i += 1\n return index\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000240", "length_bytes": 2883, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "searchRange", "signature": "def searchRange(self, nums, target)"}, {"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "searchRange_v2", "signature": "def searchRange_v2(self, nums, target)"}, {"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "searchRange_linear", "signature": "def searchRange_linear(self, nums, target)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004215", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def searchRange(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def searchRange_v2(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def searchRange_linear(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def searchRange(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def searchRange_v2(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def searchRange_linear(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def searchRange(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def searchRange_v2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def searchRange_linear(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n left = mid + 1\n else:\n right = mid - 1\n return left\n left = binary_search(nums, target, lambda x, y: x < y)\n right = binary_search(nums, target, lambda x, y: x <= y)\n return [left, right - 1] if left < len(nums) and nums[left] == target else [-1, -1]\n<|end_body_0|>\n\n<|body_start_1|>\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n right = mid - 1\n else:\n left = mid + 1\n return left\n left = binary_search(nums, target, lambda x, y: x >= y)\n if left >= len(nums) or nums[left] != target:\n return [-1, -1]\n right = binary_search(nums, target, lambda x, y: x > y)\n return [left, right - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n index = [-1, -1]\n i = 0\n while i < len(nums):\n if nums[i] == target:\n if index[0] == -1:\n index[0] = index[1] = i\n else:\n index[1] = i\n i += 1\n return index\n<|end_body_2|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def searchRange(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def searchRange_v2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def searchRange_linear(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def searchRange(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n left = mid + 1\n else:\n right = mid - 1\n return left\n left = binary_search(nums, target, lambda x, y: x < y)\n right = binary_search(nums, target, lambda x, y: x <= y)\n return [left, right - 1] if left < len(nums) and nums[left] == target else [-1, -1]\n\n def searchRange_v2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n def binary_search(nums, target, comparator):\n left, right = (0, len(nums) - 1)\n while left <= right:\n mid = left + (right - left) // 2\n if comparator(nums[mid], target):\n right = mid - 1\n else:\n left = mid + 1\n return left\n left = binary_search(nums, target, lambda x, y: x >= y)\n if left >= len(nums) or nums[left] != target:\n return [-1, -1]\n right = binary_search(nums, target, lambda x, y: x > y)\n return [left, right - 1]\n\n def searchRange_linear(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n index = [-1, -1]\n i = 0\n while i < len(nums):\n if nums[i] == target:\n if index[0] == -1:\n index[0] = index[1] = i\n else:\n index[1] = i\n i += 1\n return index\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_34.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0} {"blob_id": "027b65a1279a658506f7bfb84d73d3d5821c9715", "bodies": ["try:\n import dgl\nexcept:\n raise ImportError('This class requires dgl.')\ntry:\n import dgllife\nexcept:\n raise ImportError('This class requires dgllife.')\nif mode not in ['classification', 'regression']:\n raise ValueError(\"mode must be either 'classification' or 'regression'\")\nsuper(GCN, self).__init__()\nself.n_tasks = n_tasks\nself.mode = mode\nself.n_classes = n_classes\nself.nfeat_name = nfeat_name\nif mode == 'classification':\n out_size = n_tasks * n_classes\nelse:\n out_size = n_tasks\nfrom dgllife.model import GCNPredictor as DGLGCNPredictor\nif graph_conv_layers is None:\n graph_conv_layers = [64, 64]\nnum_gnn_layers = len(graph_conv_layers)\nif activation is not None:\n activation = [activation] * num_gnn_layers\nself.model = DGLGCNPredictor(in_feats=number_atom_features, hidden_feats=graph_conv_layers, activation=activation, residual=[residual] * num_gnn_layers, batchnorm=[batchnorm] * num_gnn_layers, dropout=[dropout] * num_gnn_layers, n_tasks=out_size, predictor_hidden_feats=predictor_hidden_feats, predictor_dropout=predictor_dropout)", "node_feats = g.ndata[self.nfeat_name]\nout = self.model(g, node_feats)\nif self.mode == 'classification':\n if self.n_tasks == 1:\n logits = out.view(-1, self.n_classes)\n softmax_dim = 1\n else:\n logits = out.view(-1, self.n_tasks, self.n_classes)\n softmax_dim = 2\n proba = F.softmax(logits, dim=softmax_dim)\n return (proba, logits)\nelse:\n return out"], "bodies_text": "<|body_start_0|>\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n try:\n import dgllife\n except:\n raise ImportError('This class requires dgllife.')\n if mode not in ['classification', 'regression']:\n raise ValueError(\"mode must be either 'classification' or 'regression'\")\n super(GCN, self).__init__()\n self.n_tasks = n_tasks\n self.mode = mode\n self.n_classes = n_classes\n self.nfeat_name = nfeat_name\n if mode == 'classification':\n out_size = n_tasks * n_classes\n else:\n out_size = n_tasks\n from dgllife.model import GCNPredictor as DGLGCNPredictor\n if graph_conv_layers is None:\n graph_conv_layers = [64, 64]\n num_gnn_layers = len(graph_conv_layers)\n if activation is not None:\n activation = [activation] * num_gnn_layers\n self.model = DGLGCNPredictor(in_feats=number_atom_features, hidden_feats=graph_conv_layers, activation=activation, residual=[residual] * num_gnn_layers, batchnorm=[batchnorm] * num_gnn_layers, dropout=[dropout] * num_gnn_layers, n_tasks=out_size, predictor_hidden_feats=predictor_hidden_feats, predictor_dropout=predictor_dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n node_feats = g.ndata[self.nfeat_name]\n out = self.model(g, node_feats)\n if self.mode == 'classification':\n if self.n_tasks == 1:\n logits = out.view(-1, self.n_classes)\n softmax_dim = 1\n else:\n logits = out.view(-1, self.n_tasks, self.n_classes)\n softmax_dim = 2\n proba = F.softmax(logits, dim=softmax_dim)\n return (proba, logits)\n else:\n return out\n<|end_body_1|>\n", "class_docstring": "Model for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea", "class_name": "GCN", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GCN:\n \"\"\"Model for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\"\"\"\n\n def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'):\n \"\"\"Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\"\"\"\n <|body_0|>\n\n def forward(self, g):\n \"\"\"Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n try:\n import dgllife\n except:\n raise ImportError('This class requires dgllife.')\n if mode not in ['classification', 'regression']:\n raise ValueError(\"mode must be either 'classification' or 'regression'\")\n super(GCN, self).__init__()\n self.n_tasks = n_tasks\n self.mode = mode\n self.n_classes = n_classes\n self.nfeat_name = nfeat_name\n if mode == 'classification':\n out_size = n_tasks * n_classes\n else:\n out_size = n_tasks\n from dgllife.model import GCNPredictor as DGLGCNPredictor\n if graph_conv_layers is None:\n graph_conv_layers = [64, 64]\n num_gnn_layers = len(graph_conv_layers)\n if activation is not None:\n activation = [activation] * num_gnn_layers\n self.model = DGLGCNPredictor(in_feats=number_atom_features, hidden_feats=graph_conv_layers, activation=activation, residual=[residual] * num_gnn_layers, batchnorm=[batchnorm] * num_gnn_layers, dropout=[dropout] * num_gnn_layers, n_tasks=out_size, predictor_hidden_feats=predictor_hidden_feats, predictor_dropout=predictor_dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n node_feats = g.ndata[self.nfeat_name]\n out = self.model(g, node_feats)\n if self.mode == 'classification':\n if self.n_tasks == 1:\n logits = out.view(-1, self.n_classes)\n softmax_dim = 1\n else:\n logits = out.view(-1, self.n_tasks, self.n_classes)\n softmax_dim = 2\n proba = F.softmax(logits, dim=softmax_dim)\n return (proba, logits)\n else:\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000241", "length_bytes": 14944, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz", "name": "__init__", "signature": "def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x')"}, {"docstring": "Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof", "name": "forward", "signature": "def forward(self, g)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000144", "prompt": "Implement the Python class `GCN` described below.\n\nClass description:\nModel for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\n\nMethod signatures and docstrings:\n- def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'): Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\n- def forward(self, g): Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof", "prompted_full_text": "Implement the Python class `GCN` described below.\n\nClass description:\nModel for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\n\nMethod signatures and docstrings:\n- def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'): Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\n- def forward(self, g): Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof\n\n<|skeleton|>\nclass GCN:\n \"\"\"Model for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\"\"\"\n\n def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'):\n \"\"\"Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\"\"\"\n <|body_0|>\n\n def forward(self, g):\n \"\"\"Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n try:\n import dgllife\n except:\n raise ImportError('This class requires dgllife.')\n if mode not in ['classification', 'regression']:\n raise ValueError(\"mode must be either 'classification' or 'regression'\")\n super(GCN, self).__init__()\n self.n_tasks = n_tasks\n self.mode = mode\n self.n_classes = n_classes\n self.nfeat_name = nfeat_name\n if mode == 'classification':\n out_size = n_tasks * n_classes\n else:\n out_size = n_tasks\n from dgllife.model import GCNPredictor as DGLGCNPredictor\n if graph_conv_layers is None:\n graph_conv_layers = [64, 64]\n num_gnn_layers = len(graph_conv_layers)\n if activation is not None:\n activation = [activation] * num_gnn_layers\n self.model = DGLGCNPredictor(in_feats=number_atom_features, hidden_feats=graph_conv_layers, activation=activation, residual=[residual] * num_gnn_layers, batchnorm=[batchnorm] * num_gnn_layers, dropout=[dropout] * num_gnn_layers, n_tasks=out_size, predictor_hidden_feats=predictor_hidden_feats, predictor_dropout=predictor_dropout)\n<|end_body_0|>\n\n<|body_start_1|>\n node_feats = g.ndata[self.nfeat_name]\n out = self.model(g, node_feats)\n if self.mode == 'classification':\n if self.n_tasks == 1:\n logits = out.view(-1, self.n_classes)\n softmax_dim = 1\n else:\n logits = out.view(-1, self.n_tasks, self.n_classes)\n softmax_dim = 2\n proba = F.softmax(logits, dim=softmax_dim)\n return (proba, logits)\n else:\n return out\n<|end_body_1|>\n", "revision_id": "ee6e67ebcf7bf04259cf13aff6388e2b791fea3d", "skeleton": "<|skeleton|>\nclass GCN:\n \"\"\"Model for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\"\"\"\n\n def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'):\n \"\"\"Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\"\"\"\n <|body_0|>\n\n def forward(self, g):\n \"\"\"Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GCN:\n \"\"\"Model for Graph Property Prediction Based on Graph Convolution Networks (GCN). This model proceeds as follows: * Update node representations in graphs with a variant of GCN * For each graph, compute its representation by 1) a weighted sum of the node representations in the graph, where the weights are computed by applying a gating function to the node representations 2) a max pooling of the node representations 3) concatenating the output of 1) and 2) * Perform the final prediction using an MLP Examples -------- >>> import deepchem as dc >>> import dgl >>> from deepchem.models import GCN >>> smiles = [\"C1CCC1\", \"C1=CC=CN=C1\"] >>> featurizer = dc.feat.MolGraphConvFeaturizer() >>> graphs = fea\"\"\"\n\n def __init__(self, n_tasks: int, graph_conv_layers: Optional[list]=None, activation=None, residual: bool=True, batchnorm: bool=False, dropout: float=0.0, predictor_hidden_feats: int=128, predictor_dropout: float=0.0, mode: str='regression', number_atom_features: int=30, n_classes: int=2, nfeat_name: str='x'):\n \"\"\"Parameters ---------- n_tasks: int Number of tasks. graph_conv_layers: list of int Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel for the i-th GCN layer. If not specified, the default value will be [64, 64]. activation: callable The activation function to apply to the output of each GCN layer. By default, no activation function will be applied. residual: bool Whether to add a residual connection within each GCN layer. Default to True. batchnorm: bool Whether to apply batch normalization to the output of each GCN layer. Default to False. dropout: float The dropout probability for the output of each GCN layer. Default to 0. predictor_hidden_feats: int The siz\"\"\"\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n try:\n import dgllife\n except:\n raise ImportError('This class requires dgllife.')\n if mode not in ['classification', 'regression']:\n raise ValueError(\"mode must be either 'classification' or 'regression'\")\n super(GCN, self).__init__()\n self.n_tasks = n_tasks\n self.mode = mode\n self.n_classes = n_classes\n self.nfeat_name = nfeat_name\n if mode == 'classification':\n out_size = n_tasks * n_classes\n else:\n out_size = n_tasks\n from dgllife.model import GCNPredictor as DGLGCNPredictor\n if graph_conv_layers is None:\n graph_conv_layers = [64, 64]\n num_gnn_layers = len(graph_conv_layers)\n if activation is not None:\n activation = [activation] * num_gnn_layers\n self.model = DGLGCNPredictor(in_feats=number_atom_features, hidden_feats=graph_conv_layers, activation=activation, residual=[residual] * num_gnn_layers, batchnorm=[batchnorm] * num_gnn_layers, dropout=[dropout] * num_gnn_layers, n_tasks=out_size, predictor_hidden_feats=predictor_hidden_feats, predictor_dropout=predictor_dropout)\n\n def forward(self, g):\n \"\"\"Predict graph labels Parameters ---------- g: DGLGraph A DGLGraph for a batch of graphs. It stores the node features in ``dgl_graph.ndata[self.nfeat_name]``. Returns ------- torch.Tensor The model output. * When self.mode = 'regression', its shape will be ``(dgl_graph.batch_size, self.n_tasks)``. * When self.mode = 'classification', the output consists of probabilities for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1. torch.Tensor, optional This is only returned when self.mode = 'classification', the output consists of the logits for classes before sof\"\"\"\n node_feats = g.ndata[self.nfeat_name]\n out = self.model(g, node_feats)\n if self.mode == 'classification':\n if self.n_tasks == 1:\n logits = out.view(-1, self.n_classes)\n softmax_dim = 1\n else:\n logits = out.view(-1, self.n_tasks, self.n_classes)\n softmax_dim = 2\n proba = F.softmax(logits, dim=softmax_dim)\n return (proba, logits)\n else:\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "deepchem/models/torch_models/gcn.py", "source_repo": "deepchem/deepchem", "split": "val", "star_events_count": 4876} {"blob_id": "b687a082f7bdae588d2931076c735d695c8e0477", "bodies": ["self.name = name\nself.failed = failed\nself.trimmed_messages = trimmed_messages\nself.messages = messages", "all_messages = self.messages[:]\nstatus_message = '%s %s check %s' % ((FAILED_MESSAGE_PREFIX, self.name, 'failed') if self.failed else (SUCCESS_MESSAGE_PREFIX, self.name, 'passed'))\nall_messages.append(status_message)\nreturn all_messages"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.failed = failed\n self.trimmed_messages = trimmed_messages\n self.messages = messages\n<|end_body_0|>\n\n<|body_start_1|>\n all_messages = self.messages[:]\n status_message = '%s %s check %s' % ((FAILED_MESSAGE_PREFIX, self.name, 'failed') if self.failed else (SUCCESS_MESSAGE_PREFIX, self.name, 'passed'))\n all_messages.append(status_message)\n return all_messages\n<|end_body_1|>\n", "class_docstring": "Task result for concurrent_task_utils.", "class_name": "TaskResult", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TaskResult:\n \"\"\"Task result for concurrent_task_utils.\"\"\"\n\n def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None:\n \"\"\"Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\"\"\"\n <|body_0|>\n\n def get_report(self) -> List[str]:\n \"\"\"Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.failed = failed\n self.trimmed_messages = trimmed_messages\n self.messages = messages\n<|end_body_0|>\n\n<|body_start_1|>\n all_messages = self.messages[:]\n status_message = '%s %s check %s' % ((FAILED_MESSAGE_PREFIX, self.name, 'failed') if self.failed else (SUCCESS_MESSAGE_PREFIX, self.name, 'passed'))\n all_messages.append(status_message)\n return all_messages\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000242", "length_bytes": 7841, "license_type": "permissive", "methods": [{"docstring": "Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.", "name": "__init__", "signature": "def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None"}, {"docstring": "Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.", "name": "get_report", "signature": "def get_report(self) -> List[str]"}], "n_methods": 2, "prompt": "Implement the Python class `TaskResult` described below.\n\nClass description:\nTask result for concurrent_task_utils.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None: Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\n- def get_report(self) -> List[str]: Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.", "prompted_full_text": "Implement the Python class `TaskResult` described below.\n\nClass description:\nTask result for concurrent_task_utils.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None: Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\n- def get_report(self) -> List[str]: Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.\n\n<|skeleton|>\nclass TaskResult:\n \"\"\"Task result for concurrent_task_utils.\"\"\"\n\n def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None:\n \"\"\"Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\"\"\"\n <|body_0|>\n\n def get_report(self) -> List[str]:\n \"\"\"Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.failed = failed\n self.trimmed_messages = trimmed_messages\n self.messages = messages\n<|end_body_0|>\n\n<|body_start_1|>\n all_messages = self.messages[:]\n status_message = '%s %s check %s' % ((FAILED_MESSAGE_PREFIX, self.name, 'failed') if self.failed else (SUCCESS_MESSAGE_PREFIX, self.name, 'passed'))\n all_messages.append(status_message)\n return all_messages\n<|end_body_1|>\n", "revision_id": "d16fdf23d790eafd63812bd7239532256e30a21d", "skeleton": "<|skeleton|>\nclass TaskResult:\n \"\"\"Task result for concurrent_task_utils.\"\"\"\n\n def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None:\n \"\"\"Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\"\"\"\n <|body_0|>\n\n def get_report(self) -> List[str]:\n \"\"\"Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TaskResult:\n \"\"\"Task result for concurrent_task_utils.\"\"\"\n\n def __init__(self, name: str, failed: bool, trimmed_messages: List[str], messages: List[str]) -> None:\n \"\"\"Constructs a TaskResult object. Args: name: str. The name of the task. failed: bool. The boolean value representing whether the task failed. trimmed_messages: list(str). List of error messages that are trimmed to keep main part of messages. messages: list(str). List of full messages returned by the objects.\"\"\"\n self.name = name\n self.failed = failed\n self.trimmed_messages = trimmed_messages\n self.messages = messages\n\n def get_report(self) -> List[str]:\n \"\"\"Returns a list of message with pass or fail status for the current check. Returns: list(str). List of full messages corresponding to the given task.\"\"\"\n all_messages = self.messages[:]\n status_message = '%s %s check %s' % ((FAILED_MESSAGE_PREFIX, self.name, 'failed') if self.failed else (SUCCESS_MESSAGE_PREFIX, self.name, 'passed'))\n all_messages.append(status_message)\n return all_messages\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/concurrent_task_utils.py", "source_repo": "oppia/oppia", "split": "val", "star_events_count": 6172} {"blob_id": "5c8858afdae7d40b663d90f047ba8184acf0ef90", "bodies": ["try:\n registration_profile = self.get(activation_key=activation_key)\nexcept self.model.DoesNotExist:\n return None\nif not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\nelse:\n return None", "new_user = User.objects.create_user(username, email, password)\nnew_user.is_active = False\nnew_user.first_name = first_name\nnew_user.last_name = last_name\nnew_user.save()\nregistration_profile = self.create_registration_profile(new_user)\nregistration_profile.send_activation_email()\nif not registration_profile:\n return None\nreturn new_user", "salt = hashlib.sha1(str(random.random())).hexdigest()[:10]\nactivation_key = hashlib.sha1(salt + user.username).hexdigest()\nreturn self.create(user=user, activation_key=activation_key)"], "bodies_text": "<|body_start_0|>\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n if not registration_profile:\n return None\n return new_user\n<|end_body_1|>\n\n<|body_start_2|>\n salt = hashlib.sha1(str(random.random())).hexdigest()[:10]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key)\n<|end_body_2|>\n", "class_docstring": "RegistrationManager Methods: activate_account create_inactive_user create_registration_profile", "class_name": "RegistrationManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegistrationManager:\n \"\"\"RegistrationManager Methods: activate_account create_inactive_user create_registration_profile\"\"\"\n\n def activate_account(self, activation_key):\n \"\"\"Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\"\"\"\n <|body_0|>\n\n def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n \"\"\"Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\"\"\"\n <|body_1|>\n\n def create_registration_profile(self, user):\n \"\"\"Create profile for registration Generates a hashed activation key to create a registration profile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n if not registration_profile:\n return None\n return new_user\n<|end_body_1|>\n\n<|body_start_2|>\n salt = hashlib.sha1(str(random.random())).hexdigest()[:10]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000243", "length_bytes": 4537, "license_type": "no_license", "methods": [{"docstring": "Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.", "name": "activate_account", "signature": "def activate_account(self, activation_key)"}, {"docstring": "Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.", "name": "create_inactive_user", "signature": "def create_inactive_user(self, username, email, password, first_name=None, last_name=None)"}, {"docstring": "Create profile for registration Generates a hashed activation key to create a registration profile", "name": "create_registration_profile", "signature": "def create_registration_profile(self, user)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005417", "prompt": "Implement the Python class `RegistrationManager` described below.\n\nClass description:\nRegistrationManager Methods: activate_account create_inactive_user create_registration_profile\n\nMethod signatures and docstrings:\n- def activate_account(self, activation_key): Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\n- def create_inactive_user(self, username, email, password, first_name=None, last_name=None): Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\n- def create_registration_profile(self, user): Create profile for registration Generates a hashed activation key to create a registration profile", "prompted_full_text": "Implement the Python class `RegistrationManager` described below.\n\nClass description:\nRegistrationManager Methods: activate_account create_inactive_user create_registration_profile\n\nMethod signatures and docstrings:\n- def activate_account(self, activation_key): Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\n- def create_inactive_user(self, username, email, password, first_name=None, last_name=None): Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\n- def create_registration_profile(self, user): Create profile for registration Generates a hashed activation key to create a registration profile\n\n<|skeleton|>\nclass RegistrationManager:\n \"\"\"RegistrationManager Methods: activate_account create_inactive_user create_registration_profile\"\"\"\n\n def activate_account(self, activation_key):\n \"\"\"Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\"\"\"\n <|body_0|>\n\n def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n \"\"\"Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\"\"\"\n <|body_1|>\n\n def create_registration_profile(self, user):\n \"\"\"Create profile for registration Generates a hashed activation key to create a registration profile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n if not registration_profile:\n return None\n return new_user\n<|end_body_1|>\n\n<|body_start_2|>\n salt = hashlib.sha1(str(random.random())).hexdigest()[:10]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key)\n<|end_body_2|>\n", "revision_id": "e04aae54afb6ba6c138f4253ca7be32faea0da81", "skeleton": "<|skeleton|>\nclass RegistrationManager:\n \"\"\"RegistrationManager Methods: activate_account create_inactive_user create_registration_profile\"\"\"\n\n def activate_account(self, activation_key):\n \"\"\"Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\"\"\"\n <|body_0|>\n\n def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n \"\"\"Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\"\"\"\n <|body_1|>\n\n def create_registration_profile(self, user):\n \"\"\"Create profile for registration Generates a hashed activation key to create a registration profile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RegistrationManager:\n \"\"\"RegistrationManager Methods: activate_account create_inactive_user create_registration_profile\"\"\"\n\n def activate_account(self, activation_key):\n \"\"\"Activate Account Given an activation key, this function queries the DB to see if there is a matching registration profile. If there is, the associated user is activated.\"\"\"\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None\n\n def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n \"\"\"Creates and inactive user Creates inactive user and an associated registration profile which contains an activation key. An email is then sent to the user with the key so they can activate their account.\"\"\"\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n if not registration_profile:\n return None\n return new_user\n\n def create_registration_profile(self, user):\n \"\"\"Create profile for registration Generates a hashed activation key to create a registration profile\"\"\"\n salt = hashlib.sha1(str(random.random())).hexdigest()[:10]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/debitum/accounts/models.py", "source_repo": "keunhong/oweapp", "split": "val", "star_events_count": 0} {"blob_id": "f269327cb123dafd350fbf7b171333a5edd45f6d", "bodies": ["import collections\nimport itertools\nfrom __builtin__ import xrange\nf = collections.defaultdict(list)\nfor a, b, c in allowed:\n f[a + b].append(c)\nmemo = {}\n\ndef pyramid(bottom):\n if bottom not in memo:\n memo[bottom] = len(bottom) == 1 or any((pyramid(''.join(i)) for i in itertools.product(*(f[bottom[x:x + 2]] for x in xrange(len(bottom) - 1)))))\n return memo[bottom]\nreturn pyramid(bottom)", "from itertools import product\nfrom collections import defaultdict\nfrom __builtin__ import xrange\ndmap = defaultdict(list)\nfor a in allowed:\n dmap[a[:2]].append(a[-1])\nmemo = {}\n\ndef mm(key):\n if len(key) == 1:\n memo[key] = True\n return memo[key]\n if key not in memo:\n bot = [key[i:i + 2] for i in xrange(len(key) - 1)]\n next_keys = [dmap[b] for b in bot]\n next_keys = [''.join(nk) for nk in product(*next_keys)]\n result = any((mm(n) for n in next_keys))\n memo[key] = result\n return memo[key]\nreturn mm(bottom)"], "bodies_text": "<|body_start_0|>\n import collections\n import itertools\n from __builtin__ import xrange\n f = collections.defaultdict(list)\n for a, b, c in allowed:\n f[a + b].append(c)\n memo = {}\n\n def pyramid(bottom):\n if bottom not in memo:\n memo[bottom] = len(bottom) == 1 or any((pyramid(''.join(i)) for i in itertools.product(*(f[bottom[x:x + 2]] for x in xrange(len(bottom) - 1)))))\n return memo[bottom]\n return pyramid(bottom)\n<|end_body_0|>\n\n<|body_start_1|>\n from itertools import product\n from collections import defaultdict\n from __builtin__ import xrange\n dmap = defaultdict(list)\n for a in allowed:\n dmap[a[:2]].append(a[-1])\n memo = {}\n\n def mm(key):\n if len(key) == 1:\n memo[key] = True\n return memo[key]\n if key not in memo:\n bot = [key[i:i + 2] for i in xrange(len(key) - 1)]\n next_keys = [dmap[b] for b in bot]\n next_keys = [''.join(nk) for nk in product(*next_keys)]\n result = any((mm(n) for n in next_keys))\n memo[key] = result\n return memo[key]\n return mm(bottom)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def pyramidTransition(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def rewrite(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import collections\n import itertools\n from __builtin__ import xrange\n f = collections.defaultdict(list)\n for a, b, c in allowed:\n f[a + b].append(c)\n memo = {}\n\n def pyramid(bottom):\n if bottom not in memo:\n memo[bottom] = len(bottom) == 1 or any((pyramid(''.join(i)) for i in itertools.product(*(f[bottom[x:x + 2]] for x in xrange(len(bottom) - 1)))))\n return memo[bottom]\n return pyramid(bottom)\n<|end_body_0|>\n\n<|body_start_1|>\n from itertools import product\n from collections import defaultdict\n from __builtin__ import xrange\n dmap = defaultdict(list)\n for a in allowed:\n dmap[a[:2]].append(a[-1])\n memo = {}\n\n def mm(key):\n if len(key) == 1:\n memo[key] = True\n return memo[key]\n if key not in memo:\n bot = [key[i:i + 2] for i in xrange(len(key) - 1)]\n next_keys = [dmap[b] for b in bot]\n next_keys = [''.join(nk) for nk in product(*next_keys)]\n result = any((mm(n) for n in next_keys))\n memo[key] = result\n return memo[key]\n return mm(bottom)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000244", "length_bytes": 3290, "license_type": "no_license", "methods": [{"docstring": ":type bottom: str :type allowed: List[str] :rtype: bool", "name": "pyramidTransition", "signature": "def pyramidTransition(self, bottom, allowed)"}, {"docstring": ":type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!", "name": "rewrite", "signature": "def rewrite(self, bottom, allowed)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001845", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pyramidTransition(self, bottom, allowed): :type bottom: str :type allowed: List[str] :rtype: bool\n- def rewrite(self, bottom, allowed): :type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pyramidTransition(self, bottom, allowed): :type bottom: str :type allowed: List[str] :rtype: bool\n- def rewrite(self, bottom, allowed): :type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!\n\n<|skeleton|>\nclass Solution:\n\n def pyramidTransition(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def rewrite(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import collections\n import itertools\n from __builtin__ import xrange\n f = collections.defaultdict(list)\n for a, b, c in allowed:\n f[a + b].append(c)\n memo = {}\n\n def pyramid(bottom):\n if bottom not in memo:\n memo[bottom] = len(bottom) == 1 or any((pyramid(''.join(i)) for i in itertools.product(*(f[bottom[x:x + 2]] for x in xrange(len(bottom) - 1)))))\n return memo[bottom]\n return pyramid(bottom)\n<|end_body_0|>\n\n<|body_start_1|>\n from itertools import product\n from collections import defaultdict\n from __builtin__ import xrange\n dmap = defaultdict(list)\n for a in allowed:\n dmap[a[:2]].append(a[-1])\n memo = {}\n\n def mm(key):\n if len(key) == 1:\n memo[key] = True\n return memo[key]\n if key not in memo:\n bot = [key[i:i + 2] for i in xrange(len(key) - 1)]\n next_keys = [dmap[b] for b in bot]\n next_keys = [''.join(nk) for nk in product(*next_keys)]\n result = any((mm(n) for n in next_keys))\n memo[key] = result\n return memo[key]\n return mm(bottom)\n<|end_body_1|>\n", "revision_id": "6350568d16b0f8c49a020f055bb6d72e2705ea56", "skeleton": "<|skeleton|>\nclass Solution:\n\n def pyramidTransition(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def rewrite(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def pyramidTransition(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool\"\"\"\n import collections\n import itertools\n from __builtin__ import xrange\n f = collections.defaultdict(list)\n for a, b, c in allowed:\n f[a + b].append(c)\n memo = {}\n\n def pyramid(bottom):\n if bottom not in memo:\n memo[bottom] = len(bottom) == 1 or any((pyramid(''.join(i)) for i in itertools.product(*(f[bottom[x:x + 2]] for x in xrange(len(bottom) - 1)))))\n return memo[bottom]\n return pyramid(bottom)\n\n def rewrite(self, bottom, allowed):\n \"\"\":type bottom: str :type allowed: List[str] :rtype: bool DP, try them all!\"\"\"\n from itertools import product\n from collections import defaultdict\n from __builtin__ import xrange\n dmap = defaultdict(list)\n for a in allowed:\n dmap[a[:2]].append(a[-1])\n memo = {}\n\n def mm(key):\n if len(key) == 1:\n memo[key] = True\n return memo[key]\n if key not in memo:\n bot = [key[i:i + 2] for i in xrange(len(key) - 1)]\n next_keys = [dmap[b] for b in bot]\n next_keys = [''.join(nk) for nk in product(*next_keys)]\n result = any((mm(n) for n in next_keys))\n memo[key] = result\n return memo[key]\n return mm(bottom)\n", "source": "the_stack_v2_python_sparse", "source_path": "depth-first-search/756_Pyramid_Transition_Matrix.py", "source_repo": "vsdrun/lc_public", "split": "val", "star_events_count": 6} {"blob_id": "4a0bab33da7154d9fe2820b3ab96d4e0b7380a68", "bodies": ["self.prot_attr = prot_attr\nself.estimator = estimator\nself.constraints = constraints\nself.constraint_weight = constraint_weight\nself.grid_size = grid_size\nself.grid_limit = grid_limit\nself.grid = grid\nself.drop_prot_attr = drop_prot_attr\nself.loss = loss\nself.min_val = min_val\nself.max_val = max_val", "self.estimator_ = clone(self.estimator)\nmoments = {'DemographicParity': red.DemographicParity, 'EqualizedOdds': red.EqualizedOdds, 'TruePositiveRateParity': red.TruePositiveRateParity, 'FalsePositiveRateParity': red.FalsePositiveRateParity, 'ErrorRateParity': red.ErrorRateParity, 'BoundedGroupLoss': red.BoundedGroupLoss}\nif isinstance(self.constraints, str):\n if self.constraints not in moments:\n raise ValueError(f'Constraint not recognized: {self.constraints}')\n if self.constraints == 'BoundedGroupLoss':\n losses = {'ZeroOne': red.ZeroOneLoss, 'Square': red.SquareLoss, 'Absolute': red.AbsoluteLoss}\n if self.loss == 'ZeroOne':\n self.loss_ = losses[self.loss]()\n else:\n self.loss_ = losses[self.loss](self.min_val, self.max_val)\n self.moment_ = moments[self.constraints](loss=self.loss_)\n else:\n self.moment_ = moments[self.constraints]()\nelif isinstance(self.constraints, red.Moment):\n self.moment_ = self.constraints\nelse:\n raise ValueError('constraints must be a string or Moment object.')\nself.model_ = red.GridSearch(estimator=self.estimator_, constraints=self.moment_, constraint_weight=self.constraint_weight, grid_size=self.grid_size, grid_limit=self.grid_limit, grid=self.grid)\nA = X[self.prot_attr]\nif self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\nif isinstance(self.model_.constraints, red.ClassificationMoment):\n le = LabelEncoder()\n y = le.fit_transform(y)\n self.classes_ = le.classes_\nself.model_.fit(X, y, sensitive_features=A)\nreturn self", "if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\nreturn self.model_.predict(X)", "if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\nif isinstance(self.model_.constraints, red.ClassificationMoment):\n return self.model_.predict_proba(X)\nraise NotImplementedError('Underlying model does not support predict_proba')"], "bodies_text": "<|body_start_0|>\n self.prot_attr = prot_attr\n self.estimator = estimator\n self.constraints = constraints\n self.constraint_weight = constraint_weight\n self.grid_size = grid_size\n self.grid_limit = grid_limit\n self.grid = grid\n self.drop_prot_attr = drop_prot_attr\n self.loss = loss\n self.min_val = min_val\n self.max_val = max_val\n<|end_body_0|>\n\n<|body_start_1|>\n self.estimator_ = clone(self.estimator)\n moments = {'DemographicParity': red.DemographicParity, 'EqualizedOdds': red.EqualizedOdds, 'TruePositiveRateParity': red.TruePositiveRateParity, 'FalsePositiveRateParity': red.FalsePositiveRateParity, 'ErrorRateParity': red.ErrorRateParity, 'BoundedGroupLoss': red.BoundedGroupLoss}\n if isinstance(self.constraints, str):\n if self.constraints not in moments:\n raise ValueError(f'Constraint not recognized: {self.constraints}')\n if self.constraints == 'BoundedGroupLoss':\n losses = {'ZeroOne': red.ZeroOneLoss, 'Square': red.SquareLoss, 'Absolute': red.AbsoluteLoss}\n if self.loss == 'ZeroOne':\n self.loss_ = losses[self.loss]()\n else:\n self.loss_ = losses[self.loss](self.min_val, self.max_val)\n self.moment_ = moments[self.constraints](loss=self.loss_)\n else:\n self.moment_ = moments[self.constraints]()\n elif isinstance(self.constraints, red.Moment):\n self.moment_ = self.constraints\n else:\n raise ValueError('constraints must be a string or Moment object.')\n self.model_ = red.GridSearch(estimator=self.estimator_, constraints=self.moment_, constraint_weight=self.constraint_weight, grid_size=self.grid_size, grid_limit=self.grid_limit, grid=self.grid)\n A = X[self.prot_attr]\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n le = LabelEncoder()\n y = le.fit_transform(y)\n self.classes_ = le.classes_\n self.model_.fit(X, y, sensitive_features=A)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n return self.model_.predict(X)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n return self.model_.predict_proba(X)\n raise NotImplementedError('Underlying model does not support predict_proba')\n<|end_body_3|>\n", "class_docstring": "Grid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,", "class_name": "GridSearchReduction", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GridSearchReduction:\n \"\"\"Grid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\"\"\"\n\n def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None):\n \"\"\"Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\"\"\"\n <|body_0|>\n\n def fit(self, X, y):\n \"\"\"Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\"\"\"\n <|body_2|>\n\n def predict_proba(self, X):\n \"\"\"Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prot_attr = prot_attr\n self.estimator = estimator\n self.constraints = constraints\n self.constraint_weight = constraint_weight\n self.grid_size = grid_size\n self.grid_limit = grid_limit\n self.grid = grid\n self.drop_prot_attr = drop_prot_attr\n self.loss = loss\n self.min_val = min_val\n self.max_val = max_val\n<|end_body_0|>\n\n<|body_start_1|>\n self.estimator_ = clone(self.estimator)\n moments = {'DemographicParity': red.DemographicParity, 'EqualizedOdds': red.EqualizedOdds, 'TruePositiveRateParity': red.TruePositiveRateParity, 'FalsePositiveRateParity': red.FalsePositiveRateParity, 'ErrorRateParity': red.ErrorRateParity, 'BoundedGroupLoss': red.BoundedGroupLoss}\n if isinstance(self.constraints, str):\n if self.constraints not in moments:\n raise ValueError(f'Constraint not recognized: {self.constraints}')\n if self.constraints == 'BoundedGroupLoss':\n losses = {'ZeroOne': red.ZeroOneLoss, 'Square': red.SquareLoss, 'Absolute': red.AbsoluteLoss}\n if self.loss == 'ZeroOne':\n self.loss_ = losses[self.loss]()\n else:\n self.loss_ = losses[self.loss](self.min_val, self.max_val)\n self.moment_ = moments[self.constraints](loss=self.loss_)\n else:\n self.moment_ = moments[self.constraints]()\n elif isinstance(self.constraints, red.Moment):\n self.moment_ = self.constraints\n else:\n raise ValueError('constraints must be a string or Moment object.')\n self.model_ = red.GridSearch(estimator=self.estimator_, constraints=self.moment_, constraint_weight=self.constraint_weight, grid_size=self.grid_size, grid_limit=self.grid_limit, grid=self.grid)\n A = X[self.prot_attr]\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n le = LabelEncoder()\n y = le.fit_transform(y)\n self.classes_ = le.classes_\n self.model_.fit(X, y, sensitive_features=A)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n return self.model_.predict(X)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n return self.model_.predict_proba(X)\n raise NotImplementedError('Underlying model does not support predict_proba')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000245", "length_bytes": 8519, "license_type": "permissive", "methods": [{"docstring": "Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other", "name": "__init__", "signature": "def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None)"}, {"docstring": "Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self", "name": "fit", "signature": "def fit(self, X, y)"}, {"docstring": "Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.", "name": "predict", "signature": "def predict(self, X)"}, {"docstring": "Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.", "name": "predict_proba", "signature": "def predict_proba(self, X)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003159", "prompt": "Implement the Python class `GridSearchReduction` described below.\n\nClass description:\nGrid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\n\nMethod signatures and docstrings:\n- def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None): Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\n- def fit(self, X, y): Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\n- def predict(self, X): Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\n- def predict_proba(self, X): Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.", "prompted_full_text": "Implement the Python class `GridSearchReduction` described below.\n\nClass description:\nGrid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\n\nMethod signatures and docstrings:\n- def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None): Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\n- def fit(self, X, y): Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\n- def predict(self, X): Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\n- def predict_proba(self, X): Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.\n\n<|skeleton|>\nclass GridSearchReduction:\n \"\"\"Grid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\"\"\"\n\n def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None):\n \"\"\"Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\"\"\"\n <|body_0|>\n\n def fit(self, X, y):\n \"\"\"Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\"\"\"\n <|body_2|>\n\n def predict_proba(self, X):\n \"\"\"Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prot_attr = prot_attr\n self.estimator = estimator\n self.constraints = constraints\n self.constraint_weight = constraint_weight\n self.grid_size = grid_size\n self.grid_limit = grid_limit\n self.grid = grid\n self.drop_prot_attr = drop_prot_attr\n self.loss = loss\n self.min_val = min_val\n self.max_val = max_val\n<|end_body_0|>\n\n<|body_start_1|>\n self.estimator_ = clone(self.estimator)\n moments = {'DemographicParity': red.DemographicParity, 'EqualizedOdds': red.EqualizedOdds, 'TruePositiveRateParity': red.TruePositiveRateParity, 'FalsePositiveRateParity': red.FalsePositiveRateParity, 'ErrorRateParity': red.ErrorRateParity, 'BoundedGroupLoss': red.BoundedGroupLoss}\n if isinstance(self.constraints, str):\n if self.constraints not in moments:\n raise ValueError(f'Constraint not recognized: {self.constraints}')\n if self.constraints == 'BoundedGroupLoss':\n losses = {'ZeroOne': red.ZeroOneLoss, 'Square': red.SquareLoss, 'Absolute': red.AbsoluteLoss}\n if self.loss == 'ZeroOne':\n self.loss_ = losses[self.loss]()\n else:\n self.loss_ = losses[self.loss](self.min_val, self.max_val)\n self.moment_ = moments[self.constraints](loss=self.loss_)\n else:\n self.moment_ = moments[self.constraints]()\n elif isinstance(self.constraints, red.Moment):\n self.moment_ = self.constraints\n else:\n raise ValueError('constraints must be a string or Moment object.')\n self.model_ = red.GridSearch(estimator=self.estimator_, constraints=self.moment_, constraint_weight=self.constraint_weight, grid_size=self.grid_size, grid_limit=self.grid_limit, grid=self.grid)\n A = X[self.prot_attr]\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n le = LabelEncoder()\n y = le.fit_transform(y)\n self.classes_ = le.classes_\n self.model_.fit(X, y, sensitive_features=A)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n return self.model_.predict(X)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n return self.model_.predict_proba(X)\n raise NotImplementedError('Underlying model does not support predict_proba')\n<|end_body_3|>\n", "revision_id": "6f9972e4a7dbca2402f29b86ea67889143dbeb3e", "skeleton": "<|skeleton|>\nclass GridSearchReduction:\n \"\"\"Grid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\"\"\"\n\n def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None):\n \"\"\"Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\"\"\"\n <|body_0|>\n\n def fit(self, X, y):\n \"\"\"Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\"\"\"\n <|body_2|>\n\n def predict_proba(self, X):\n \"\"\"Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GridSearchReduction:\n \"\"\"Grid search reduction for fair classification or regression. Grid search is an in-processing technique that can be used for fair classification or fair regression. For classification it reduces fair classification to a sequence of cost-sensitive classification problems, returning the deterministic classifier with the lowest empirical error subject to fair classification constraints [#agarwal18]_ among the candidates searched. For regression it uses the same priniciple to return a deterministic regressor with the lowest empirical error subject to the constraint of bounded group loss [#agarwal19]_. References: .. [#agarwal18] `A. Agarwal, A. Beygelzimer, M. Dudik, J. Langford, and H. Wallach,\"\"\"\n\n def __init__(self, prot_attr, estimator, constraints, constraint_weight=0.5, grid_size=10, grid_limit=2.0, grid=None, drop_prot_attr=True, loss='ZeroOne', min_val=None, max_val=None):\n \"\"\"Args: prot_attr: String or array-like column indices or column names of protected attributes. estimator: An estimator implementing methods ``fit(X, y, sample_weight)`` and ``predict(X)``, where ``X`` is the matrix of features, ``y`` is the vector of labels, and ``sample_weight`` is a vector of weights; labels ``y`` and predictions returned by ``predict(X)`` are either 0 or 1 -- e.g. scikit-learn classifiers/regressors. constraints (str or fairlearn.reductions.Moment): If string, keyword denoting the :class:`fairlearn.reductions.Moment` object defining the disparity constraints -- e.g., \"DemographicParity\" or \"EqualizedOdds\". For a full list of possible options see `self.model.moments`. Other\"\"\"\n self.prot_attr = prot_attr\n self.estimator = estimator\n self.constraints = constraints\n self.constraint_weight = constraint_weight\n self.grid_size = grid_size\n self.grid_limit = grid_limit\n self.grid = grid\n self.drop_prot_attr = drop_prot_attr\n self.loss = loss\n self.min_val = min_val\n self.max_val = max_val\n\n def fit(self, X, y):\n \"\"\"Train a less biased classifier or regressor with the given training data. Args: X (pandas.DataFrame): Training samples. y (array-like): Training output. Returns: self\"\"\"\n self.estimator_ = clone(self.estimator)\n moments = {'DemographicParity': red.DemographicParity, 'EqualizedOdds': red.EqualizedOdds, 'TruePositiveRateParity': red.TruePositiveRateParity, 'FalsePositiveRateParity': red.FalsePositiveRateParity, 'ErrorRateParity': red.ErrorRateParity, 'BoundedGroupLoss': red.BoundedGroupLoss}\n if isinstance(self.constraints, str):\n if self.constraints not in moments:\n raise ValueError(f'Constraint not recognized: {self.constraints}')\n if self.constraints == 'BoundedGroupLoss':\n losses = {'ZeroOne': red.ZeroOneLoss, 'Square': red.SquareLoss, 'Absolute': red.AbsoluteLoss}\n if self.loss == 'ZeroOne':\n self.loss_ = losses[self.loss]()\n else:\n self.loss_ = losses[self.loss](self.min_val, self.max_val)\n self.moment_ = moments[self.constraints](loss=self.loss_)\n else:\n self.moment_ = moments[self.constraints]()\n elif isinstance(self.constraints, red.Moment):\n self.moment_ = self.constraints\n else:\n raise ValueError('constraints must be a string or Moment object.')\n self.model_ = red.GridSearch(estimator=self.estimator_, constraints=self.moment_, constraint_weight=self.constraint_weight, grid_size=self.grid_size, grid_limit=self.grid_limit, grid=self.grid)\n A = X[self.prot_attr]\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n le = LabelEncoder()\n y = le.fit_transform(y)\n self.classes_ = le.classes_\n self.model_.fit(X, y, sensitive_features=A)\n return self\n\n def predict(self, X):\n \"\"\"Predict output for the given samples. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: Predicted output per sample.\"\"\"\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n return self.model_.predict(X)\n\n def predict_proba(self, X):\n \"\"\"Probability estimates. The returned estimates for all classes are ordered by the label of classes for classification. Args: X (pandas.DataFrame): Test samples. Returns: numpy.ndarray: returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.\"\"\"\n if self.drop_prot_attr:\n X = X.drop(self.prot_attr, axis=1)\n if isinstance(self.model_.constraints, red.ClassificationMoment):\n return self.model_.predict_proba(X)\n raise NotImplementedError('Underlying model does not support predict_proba')\n", "source": "the_stack_v2_python_sparse", "source_path": "aif360/sklearn/inprocessing/grid_search_reduction.py", "source_repo": "Trusted-AI/AIF360", "split": "val", "star_events_count": 1157} {"blob_id": "14952a22d4f5f65b2ad348b79c17ce790093b629", "bodies": ["re = cloudparking_service().mockCarInOut(send_data['carNum'], 0, send_data['inClientID'])\nresult = re\nAssertions().assert_in_text(result, expect['mockCarInMessage'])", "re = cloudparking_service(centerMonitorLogin).mockCarInOut(send_data['carNum'], 1, send_data['outClientID'])\nresult = re\nAssertions().assert_in_text(result, expect['mockCarOutMessage'])", "re = CarInOutHandle(centerMonitorLogin).sendVoiceMessage(send_data['carNum'], send_data['voiceMsg'])\nresult = re['status']\nAssertions().assert_text(result, expect['sendVoiceMsg'])", "re = PersonalInfo(centerMonitorLogin).logList()\nresult = re[0]\nAssertions().assert_in_text(result['handleMessage'], expect['operatorLogMsg'])"], "bodies_text": "<|body_start_0|>\n re = cloudparking_service().mockCarInOut(send_data['carNum'], 0, send_data['inClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarInMessage'])\n<|end_body_0|>\n\n<|body_start_1|>\n re = cloudparking_service(centerMonitorLogin).mockCarInOut(send_data['carNum'], 1, send_data['outClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarOutMessage'])\n<|end_body_1|>\n\n<|body_start_2|>\n re = CarInOutHandle(centerMonitorLogin).sendVoiceMessage(send_data['carNum'], send_data['voiceMsg'])\n result = re['status']\n Assertions().assert_text(result, expect['sendVoiceMsg'])\n<|end_body_2|>\n\n<|body_start_3|>\n re = PersonalInfo(centerMonitorLogin).logList()\n result = re[0]\n Assertions().assert_in_text(result['handleMessage'], expect['operatorLogMsg'])\n<|end_body_3|>\n", "class_docstring": "", "class_name": "TestOperatorLog", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestOperatorLog:\n\n def test_mockCarIn(self, send_data, expect):\n \"\"\"模拟车辆进场\"\"\"\n <|body_0|>\n\n def test_mockCarOut(self, centerMonitorLogin, send_data, expect):\n \"\"\"模拟离场\"\"\"\n <|body_1|>\n\n def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect):\n \"\"\"发送语音\"\"\"\n <|body_2|>\n\n def test_operatorLog(self, centerMonitorLogin, send_data, expect):\n \"\"\"检查操作日志-是否有登记放行-未完成\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n re = cloudparking_service().mockCarInOut(send_data['carNum'], 0, send_data['inClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarInMessage'])\n<|end_body_0|>\n\n<|body_start_1|>\n re = cloudparking_service(centerMonitorLogin).mockCarInOut(send_data['carNum'], 1, send_data['outClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarOutMessage'])\n<|end_body_1|>\n\n<|body_start_2|>\n re = CarInOutHandle(centerMonitorLogin).sendVoiceMessage(send_data['carNum'], send_data['voiceMsg'])\n result = re['status']\n Assertions().assert_text(result, expect['sendVoiceMsg'])\n<|end_body_2|>\n\n<|body_start_3|>\n re = PersonalInfo(centerMonitorLogin).logList()\n result = re[0]\n Assertions().assert_in_text(result['handleMessage'], expect['operatorLogMsg'])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000246", "length_bytes": 1831, "license_type": "no_license", "methods": [{"docstring": "模拟车辆进场", "name": "test_mockCarIn", "signature": "def test_mockCarIn(self, send_data, expect)"}, {"docstring": "模拟离场", "name": "test_mockCarOut", "signature": "def test_mockCarOut(self, centerMonitorLogin, send_data, expect)"}, {"docstring": "发送语音", "name": "test_sendVoiceMsg", "signature": "def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect)"}, {"docstring": "检查操作日志-是否有登记放行-未完成", "name": "test_operatorLog", "signature": "def test_operatorLog(self, centerMonitorLogin, send_data, expect)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004470", "prompt": "Implement the Python class `TestOperatorLog` described below.\n\nClass description:\nImplement the TestOperatorLog class.\n\nMethod signatures and docstrings:\n- def test_mockCarIn(self, send_data, expect): 模拟车辆进场\n- def test_mockCarOut(self, centerMonitorLogin, send_data, expect): 模拟离场\n- def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect): 发送语音\n- def test_operatorLog(self, centerMonitorLogin, send_data, expect): 检查操作日志-是否有登记放行-未完成", "prompted_full_text": "Implement the Python class `TestOperatorLog` described below.\n\nClass description:\nImplement the TestOperatorLog class.\n\nMethod signatures and docstrings:\n- def test_mockCarIn(self, send_data, expect): 模拟车辆进场\n- def test_mockCarOut(self, centerMonitorLogin, send_data, expect): 模拟离场\n- def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect): 发送语音\n- def test_operatorLog(self, centerMonitorLogin, send_data, expect): 检查操作日志-是否有登记放行-未完成\n\n<|skeleton|>\nclass TestOperatorLog:\n\n def test_mockCarIn(self, send_data, expect):\n \"\"\"模拟车辆进场\"\"\"\n <|body_0|>\n\n def test_mockCarOut(self, centerMonitorLogin, send_data, expect):\n \"\"\"模拟离场\"\"\"\n <|body_1|>\n\n def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect):\n \"\"\"发送语音\"\"\"\n <|body_2|>\n\n def test_operatorLog(self, centerMonitorLogin, send_data, expect):\n \"\"\"检查操作日志-是否有登记放行-未完成\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n re = cloudparking_service().mockCarInOut(send_data['carNum'], 0, send_data['inClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarInMessage'])\n<|end_body_0|>\n\n<|body_start_1|>\n re = cloudparking_service(centerMonitorLogin).mockCarInOut(send_data['carNum'], 1, send_data['outClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarOutMessage'])\n<|end_body_1|>\n\n<|body_start_2|>\n re = CarInOutHandle(centerMonitorLogin).sendVoiceMessage(send_data['carNum'], send_data['voiceMsg'])\n result = re['status']\n Assertions().assert_text(result, expect['sendVoiceMsg'])\n<|end_body_2|>\n\n<|body_start_3|>\n re = PersonalInfo(centerMonitorLogin).logList()\n result = re[0]\n Assertions().assert_in_text(result['handleMessage'], expect['operatorLogMsg'])\n<|end_body_3|>\n", "revision_id": "34c368c109867da26d9256bca85f872b0fac2ea7", "skeleton": "<|skeleton|>\nclass TestOperatorLog:\n\n def test_mockCarIn(self, send_data, expect):\n \"\"\"模拟车辆进场\"\"\"\n <|body_0|>\n\n def test_mockCarOut(self, centerMonitorLogin, send_data, expect):\n \"\"\"模拟离场\"\"\"\n <|body_1|>\n\n def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect):\n \"\"\"发送语音\"\"\"\n <|body_2|>\n\n def test_operatorLog(self, centerMonitorLogin, send_data, expect):\n \"\"\"检查操作日志-是否有登记放行-未完成\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestOperatorLog:\n def test_mockCarIn(self, send_data, expect):\n \"\"\"模拟车辆进场\"\"\"\n re = cloudparking_service().mockCarInOut(send_data['carNum'], 0, send_data['inClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarInMessage'])\n\n def test_mockCarOut(self, centerMonitorLogin, send_data, expect):\n \"\"\"模拟离场\"\"\"\n re = cloudparking_service(centerMonitorLogin).mockCarInOut(send_data['carNum'], 1, send_data['outClientID'])\n result = re\n Assertions().assert_in_text(result, expect['mockCarOutMessage'])\n\n def test_sendVoiceMsg(self, centerMonitorLogin, send_data, expect):\n \"\"\"发送语音\"\"\"\n re = CarInOutHandle(centerMonitorLogin).sendVoiceMessage(send_data['carNum'], send_data['voiceMsg'])\n result = re['status']\n Assertions().assert_text(result, expect['sendVoiceMsg'])\n\n def test_operatorLog(self, centerMonitorLogin, send_data, expect):\n \"\"\"检查操作日志-是否有登记放行-未完成\"\"\"\n re = PersonalInfo(centerMonitorLogin).logList()\n result = re[0]\n Assertions().assert_in_text(result['handleMessage'], expect['operatorLogMsg'])\n", "source": "the_stack_v2_python_sparse", "source_path": "test_suite/centerMonitorRoom/personalInfo/test_operatorLog.py", "source_repo": "oyebino/pomp_api", "split": "val", "star_events_count": 1} {"blob_id": "951508147bd5ffdaf7799bdd1625eaf5a5d81dad", "bodies": ["def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n\ndef find_max_num_index(top_index):\n max = float('inf')\n idx = -1\n for i in range(top_index):\n if arr[i] > max:\n max = arr[i]\n idx = i\n return idx\nfor i in range(len(arr), 0, -1):\n biggest = find_max_num_index(i)\n flip(biggest)\n print(arr)\n flip(i)\nreturn arr", "print(arr)\n\ndef flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\ni = 0\nwhile i < len(arr) and arr[i] < arr[i + 1]:\n i += 1\nif i == len(arr) - 1:\n return arr\nelse:\n k = i\n return self.pancakeSort(arr[:k][::-1] + arr[k:])"], "bodies_text": "<|body_start_0|>\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n\n def find_max_num_index(top_index):\n max = float('inf')\n idx = -1\n for i in range(top_index):\n if arr[i] > max:\n max = arr[i]\n idx = i\n return idx\n for i in range(len(arr), 0, -1):\n biggest = find_max_num_index(i)\n flip(biggest)\n print(arr)\n flip(i)\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n print(arr)\n\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n i = 0\n while i < len(arr) and arr[i] < arr[i + 1]:\n i += 1\n if i == len(arr) - 1:\n return arr\n else:\n k = i\n return self.pancakeSort(arr[:k][::-1] + arr[k:])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n\n def find_max_num_index(top_index):\n max = float('inf')\n idx = -1\n for i in range(top_index):\n if arr[i] > max:\n max = arr[i]\n idx = i\n return idx\n for i in range(len(arr), 0, -1):\n biggest = find_max_num_index(i)\n flip(biggest)\n print(arr)\n flip(i)\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n print(arr)\n\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n i = 0\n while i < len(arr) and arr[i] < arr[i + 1]:\n i += 1\n if i == len(arr) - 1:\n return arr\n else:\n k = i\n return self.pancakeSort(arr[:k][::-1] + arr[k:])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000247", "length_bytes": 2159, "license_type": "no_license", "methods": [{"docstring": ":type A: List[int] :rtype: List[int]", "name": "pancakeSort", "signature": "def pancakeSort(self, arr)"}, {"docstring": ":type A: List[int] :rtype: List[int]", "name": "pancakeSort", "signature": "def pancakeSort(self, arr)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pancakeSort(self, arr): :type A: List[int] :rtype: List[int]\n- def pancakeSort(self, arr): :type A: List[int] :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pancakeSort(self, arr): :type A: List[int] :rtype: List[int]\n- def pancakeSort(self, arr): :type A: List[int] :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n\n def find_max_num_index(top_index):\n max = float('inf')\n idx = -1\n for i in range(top_index):\n if arr[i] > max:\n max = arr[i]\n idx = i\n return idx\n for i in range(len(arr), 0, -1):\n biggest = find_max_num_index(i)\n flip(biggest)\n print(arr)\n flip(i)\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n print(arr)\n\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n i = 0\n while i < len(arr) and arr[i] < arr[i + 1]:\n i += 1\n if i == len(arr) - 1:\n return arr\n else:\n k = i\n return self.pancakeSort(arr[:k][::-1] + arr[k:])\n<|end_body_1|>\n", "revision_id": "844f502da4d6fb9cd69cf0a1ef71da3385a4d2b4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n\n def find_max_num_index(top_index):\n max = float('inf')\n idx = -1\n for i in range(top_index):\n if arr[i] > max:\n max = arr[i]\n idx = i\n return idx\n for i in range(len(arr), 0, -1):\n biggest = find_max_num_index(i)\n flip(biggest)\n print(arr)\n flip(i)\n return arr\n\n def pancakeSort(self, arr):\n \"\"\":type A: List[int] :rtype: List[int]\"\"\"\n print(arr)\n\n def flip(k):\n arr[:] = arr[:k][::-1] + arr[k:]\n i = 0\n while i < len(arr) and arr[i] < arr[i + 1]:\n i += 1\n if i == len(arr) - 1:\n return arr\n else:\n k = i\n return self.pancakeSort(arr[:k][::-1] + arr[k:])\n", "source": "the_stack_v2_python_sparse", "source_path": "969-pancake_sorting.py", "source_repo": "stevestar888/leetcode-problems", "split": "val", "star_events_count": 2} {"blob_id": "305bfa2f1c1e52d2756dfc6d3efe1d9ab79228cf", "bodies": ["if rowIndex == 0:\n return [1]\nif rowIndex == 1:\n return [1, 1]\nres = [1] * (rowIndex + 1)\nfor i in range(1, rowIndex):\n res[i] = self.getRow(rowIndex - 1)[i - 1] + self.getRow(rowIndex - 1)[i]\nreturn res", "if rowIndex == 0:\n return [1]\nif rowIndex == 1:\n return [1, 1]\nres = [[1] * x for x in range(1, rowIndex + 2)]\nfor i in range(2, rowIndex + 1):\n for j in range(1, len(res[i]) - 1):\n res[i][j] = res[i - 1][j - 1] + res[i - 1][j]\nreturn res[-1]", "if rowIndex == 0:\n return [1]\nres = []\ni = j = 1\nh = rowIndex\nwhile i < rowIndex:\n res.append(h // j)\n h *= rowIndex - i\n j *= i + 1\n i += 1\nres.append(1)\nres.insert(0, 1)\nreturn res"], "bodies_text": "<|body_start_0|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [1] * (rowIndex + 1)\n for i in range(1, rowIndex):\n res[i] = self.getRow(rowIndex - 1)[i - 1] + self.getRow(rowIndex - 1)[i]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [[1] * x for x in range(1, rowIndex + 2)]\n for i in range(2, rowIndex + 1):\n for j in range(1, len(res[i]) - 1):\n res[i][j] = res[i - 1][j - 1] + res[i - 1][j]\n return res[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if rowIndex == 0:\n return [1]\n res = []\n i = j = 1\n h = rowIndex\n while i < rowIndex:\n res.append(h // j)\n h *= rowIndex - i\n j *= i + 1\n i += 1\n res.append(1)\n res.insert(0, 1)\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def getRow(self, rowIndex):\n \"\"\"递归 :param rowIndex: :return:\"\"\"\n <|body_0|>\n\n def getRow2(self, rowIndex):\n \"\"\"非递归 打印出所有结果 :param rowIndex: :return:\"\"\"\n <|body_1|>\n\n def getRow3(self, rowIndex):\n \"\"\"杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [1] * (rowIndex + 1)\n for i in range(1, rowIndex):\n res[i] = self.getRow(rowIndex - 1)[i - 1] + self.getRow(rowIndex - 1)[i]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [[1] * x for x in range(1, rowIndex + 2)]\n for i in range(2, rowIndex + 1):\n for j in range(1, len(res[i]) - 1):\n res[i][j] = res[i - 1][j - 1] + res[i - 1][j]\n return res[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if rowIndex == 0:\n return [1]\n res = []\n i = j = 1\n h = rowIndex\n while i < rowIndex:\n res.append(h // j)\n h *= rowIndex - i\n j *= i + 1\n i += 1\n res.append(1)\n res.insert(0, 1)\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000248", "length_bytes": 1689, "license_type": "no_license", "methods": [{"docstring": "递归 :param rowIndex: :return:", "name": "getRow", "signature": "def getRow(self, rowIndex)"}, {"docstring": "非递归 打印出所有结果 :param rowIndex: :return:", "name": "getRow2", "signature": "def getRow2(self, rowIndex)"}, {"docstring": "杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:", "name": "getRow3", "signature": "def getRow3(self, rowIndex)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def getRow(self, rowIndex): 递归 :param rowIndex: :return:\n- def getRow2(self, rowIndex): 非递归 打印出所有结果 :param rowIndex: :return:\n- def getRow3(self, rowIndex): 杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def getRow(self, rowIndex): 递归 :param rowIndex: :return:\n- def getRow2(self, rowIndex): 非递归 打印出所有结果 :param rowIndex: :return:\n- def getRow3(self, rowIndex): 杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:\n\n<|skeleton|>\nclass Solution:\n\n def getRow(self, rowIndex):\n \"\"\"递归 :param rowIndex: :return:\"\"\"\n <|body_0|>\n\n def getRow2(self, rowIndex):\n \"\"\"非递归 打印出所有结果 :param rowIndex: :return:\"\"\"\n <|body_1|>\n\n def getRow3(self, rowIndex):\n \"\"\"杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [1] * (rowIndex + 1)\n for i in range(1, rowIndex):\n res[i] = self.getRow(rowIndex - 1)[i - 1] + self.getRow(rowIndex - 1)[i]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [[1] * x for x in range(1, rowIndex + 2)]\n for i in range(2, rowIndex + 1):\n for j in range(1, len(res[i]) - 1):\n res[i][j] = res[i - 1][j - 1] + res[i - 1][j]\n return res[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if rowIndex == 0:\n return [1]\n res = []\n i = j = 1\n h = rowIndex\n while i < rowIndex:\n res.append(h // j)\n h *= rowIndex - i\n j *= i + 1\n i += 1\n res.append(1)\n res.insert(0, 1)\n return res\n<|end_body_2|>\n", "revision_id": "5d3574ccd282d0146c83c286ae28d8baaabd4910", "skeleton": "<|skeleton|>\nclass Solution:\n\n def getRow(self, rowIndex):\n \"\"\"递归 :param rowIndex: :return:\"\"\"\n <|body_0|>\n\n def getRow2(self, rowIndex):\n \"\"\"非递归 打印出所有结果 :param rowIndex: :return:\"\"\"\n <|body_1|>\n\n def getRow3(self, rowIndex):\n \"\"\"杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def getRow(self, rowIndex):\n \"\"\"递归 :param rowIndex: :return:\"\"\"\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [1] * (rowIndex + 1)\n for i in range(1, rowIndex):\n res[i] = self.getRow(rowIndex - 1)[i - 1] + self.getRow(rowIndex - 1)[i]\n return res\n\n def getRow2(self, rowIndex):\n \"\"\"非递归 打印出所有结果 :param rowIndex: :return:\"\"\"\n if rowIndex == 0:\n return [1]\n if rowIndex == 1:\n return [1, 1]\n res = [[1] * x for x in range(1, rowIndex + 2)]\n for i in range(2, rowIndex + 1):\n for j in range(1, len(res[i]) - 1):\n res[i][j] = res[i - 1][j - 1] + res[i - 1][j]\n return res[-1]\n\n def getRow3(self, rowIndex):\n \"\"\"杨辉三角的数学性质:第n行的m个数可表示为 C(n-1,m-1),即为从n-1个不同元素中取m-1个元素的组合数 :param rowIndex: :return:\"\"\"\n if rowIndex == 0:\n return [1]\n res = []\n i = j = 1\n h = rowIndex\n while i < rowIndex:\n res.append(h // j)\n h *= rowIndex - i\n j *= i + 1\n i += 1\n res.append(1)\n res.insert(0, 1)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "119_杨辉三角 II.py", "source_repo": "lovehhf/LeetCode", "split": "val", "star_events_count": 0} {"blob_id": "6c952ce6ef498b3213542d60cb26c72a2df90e6d", "bodies": ["self.X = X\nself.fs = fs\nself.N = N\nself.K = K\nif self.K == None:\n self.K = int(len(X) / 2) - 1", "x = np.zeros(self.N)\nfor n in range(self.N):\n x[n] = 1 / np.sqrt(self.N) * self.X[0] * np.exp(1j * 2 * cmath.pi * 0 * n / self.N)\n for k in range(1, self.K + 1):\n x[n] = x[n] + 1 / np.sqrt(self.N) * self.X[k] * np.exp(1j * 2 * cmath.pi * k * n / self.N)\n x[n] = x[n] + 1 / np.sqrt(self.N) * np.conj(self.X[k]) * np.exp(-1j * 2 * cmath.pi * k * n / self.N)\nTs = 1 / self.fs\nTreal = np.arange(self.N) * Ts\nreturn (x, Treal)", "x = np.fft.ifft(self.X, self.N) * np.sqrt(self.N)\nTs = 1 / self.fs\nTreal = np.arange(self.N) * Ts\nreturn (x, Treal)"], "bodies_text": "<|body_start_0|>\n self.X = X\n self.fs = fs\n self.N = N\n self.K = K\n if self.K == None:\n self.K = int(len(X) / 2) - 1\n<|end_body_0|>\n\n<|body_start_1|>\n x = np.zeros(self.N)\n for n in range(self.N):\n x[n] = 1 / np.sqrt(self.N) * self.X[0] * np.exp(1j * 2 * cmath.pi * 0 * n / self.N)\n for k in range(1, self.K + 1):\n x[n] = x[n] + 1 / np.sqrt(self.N) * self.X[k] * np.exp(1j * 2 * cmath.pi * k * n / self.N)\n x[n] = x[n] + 1 / np.sqrt(self.N) * np.conj(self.X[k]) * np.exp(-1j * 2 * cmath.pi * k * n / self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_1|>\n\n<|body_start_2|>\n x = np.fft.ifft(self.X, self.N) * np.sqrt(self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_2|>\n", "class_docstring": "idft Inverse Discrete Fourier transform.", "class_name": "idft", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass idft:\n \"\"\"idft Inverse Discrete Fourier transform.\"\"\"\n\n def __init__(self, X, fs, N, K=None):\n \"\"\":param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\"\"\"\n <|body_0|>\n\n def solve_K(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\"\"\"\n <|body_1|>\n\n def solve_ifft(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X\n self.fs = fs\n self.N = N\n self.K = K\n if self.K == None:\n self.K = int(len(X) / 2) - 1\n<|end_body_0|>\n\n<|body_start_1|>\n x = np.zeros(self.N)\n for n in range(self.N):\n x[n] = 1 / np.sqrt(self.N) * self.X[0] * np.exp(1j * 2 * cmath.pi * 0 * n / self.N)\n for k in range(1, self.K + 1):\n x[n] = x[n] + 1 / np.sqrt(self.N) * self.X[k] * np.exp(1j * 2 * cmath.pi * k * n / self.N)\n x[n] = x[n] + 1 / np.sqrt(self.N) * np.conj(self.X[k]) * np.exp(-1j * 2 * cmath.pi * k * n / self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_1|>\n\n<|body_start_2|>\n x = np.fft.ifft(self.X, self.N) * np.sqrt(self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000249", "length_bytes": 25417, "license_type": "no_license", "methods": [{"docstring": ":param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.", "name": "__init__", "signature": "def __init__(self, X, fs, N, K=None)"}, {"docstring": "\\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N", "name": "solve_K", "signature": "def solve_K(self)"}, {"docstring": "\\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N", "name": "solve_ifft", "signature": "def solve_ifft(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002198", "prompt": "Implement the Python class `idft` described below.\n\nClass description:\nidft Inverse Discrete Fourier transform.\n\nMethod signatures and docstrings:\n- def __init__(self, X, fs, N, K=None): :param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\n- def solve_K(self): \\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\n- def solve_ifft(self): \\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N", "prompted_full_text": "Implement the Python class `idft` described below.\n\nClass description:\nidft Inverse Discrete Fourier transform.\n\nMethod signatures and docstrings:\n- def __init__(self, X, fs, N, K=None): :param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\n- def solve_K(self): \\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\n- def solve_ifft(self): \\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N\n\n<|skeleton|>\nclass idft:\n \"\"\"idft Inverse Discrete Fourier transform.\"\"\"\n\n def __init__(self, X, fs, N, K=None):\n \"\"\":param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\"\"\"\n <|body_0|>\n\n def solve_K(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\"\"\"\n <|body_1|>\n\n def solve_ifft(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X = X\n self.fs = fs\n self.N = N\n self.K = K\n if self.K == None:\n self.K = int(len(X) / 2) - 1\n<|end_body_0|>\n\n<|body_start_1|>\n x = np.zeros(self.N)\n for n in range(self.N):\n x[n] = 1 / np.sqrt(self.N) * self.X[0] * np.exp(1j * 2 * cmath.pi * 0 * n / self.N)\n for k in range(1, self.K + 1):\n x[n] = x[n] + 1 / np.sqrt(self.N) * self.X[k] * np.exp(1j * 2 * cmath.pi * k * n / self.N)\n x[n] = x[n] + 1 / np.sqrt(self.N) * np.conj(self.X[k]) * np.exp(-1j * 2 * cmath.pi * k * n / self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_1|>\n\n<|body_start_2|>\n x = np.fft.ifft(self.X, self.N) * np.sqrt(self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n<|end_body_2|>\n", "revision_id": "b72322cfc6d81c996117cea2160ee312da62d3ed", "skeleton": "<|skeleton|>\nclass idft:\n \"\"\"idft Inverse Discrete Fourier transform.\"\"\"\n\n def __init__(self, X, fs, N, K=None):\n \"\"\":param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\"\"\"\n <|body_0|>\n\n def solve_K(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\"\"\"\n <|body_1|>\n\n def solve_ifft(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class idft:\n \"\"\"idft Inverse Discrete Fourier transform.\"\"\"\n\n def __init__(self, X, fs, N, K=None):\n \"\"\":param X: Input DFT X :param fs: Input integer fs contains the sample frequency :param N: The number of total signal samples N :param K: Input positive integer that determines the number of coeffients used to calculate the iDFT.\"\"\"\n self.X = X\n self.fs = fs\n self.N = N\n self.K = K\n if self.K == None:\n self.K = int(len(X) / 2) - 1\n\n def solve_K(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with truncated K coefficients (Due to computation complexity, we will not use it in Section 2) :return iDFT x of duration N from partial DFT X, i.e., X[0], ..., X[K] with K < N/2 :Treal the realt time vector of size N\"\"\"\n x = np.zeros(self.N)\n for n in range(self.N):\n x[n] = 1 / np.sqrt(self.N) * self.X[0] * np.exp(1j * 2 * cmath.pi * 0 * n / self.N)\n for k in range(1, self.K + 1):\n x[n] = x[n] + 1 / np.sqrt(self.N) * self.X[k] * np.exp(1j * 2 * cmath.pi * k * n / self.N)\n x[n] = x[n] + 1 / np.sqrt(self.N) * np.conj(self.X[k]) * np.exp(-1j * 2 * cmath.pi * k * n / self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n\n def solve_ifft(self):\n \"\"\"\\\\\\\\\\\\ METHOD: Compute the iDFT with provided function np.fft.ifft (Computationally efficient) :Treal the realt time vector of size N\"\"\"\n x = np.fft.ifft(self.X, self.N) * np.sqrt(self.N)\n Ts = 1 / self.fs\n Treal = np.arange(self.N) * Ts\n return (x, Treal)\n", "source": "the_stack_v2_python_sparse", "source_path": "Inverse Discrete Fourier Transform/iDFT_main.py", "source_repo": "FG-14/Signals-and-Information-Processing-DSP-", "split": "val", "star_events_count": 0} {"blob_id": "45f9a14b40a5267d1d89f337bfd4211fda452139", "bodies": ["all_mpis = []\nall_modules = []\nall_purposes = []\nfor sub_class in monitor.common.Monitor.__subclasses__():\n all_mpis.append((sub_class.module(sub_class), sub_class.purpose(sub_class)))\n all_modules.append(sub_class.module(sub_class))\n all_purposes.append(sub_class.purpose(sub_class))\nself.get_monitors.__func__.__doc__ = self.get_monitors.__func__.__doc__ % (set(all_modules), set(all_purposes))\nself.get_monitor.__func__.__doc__ = self.get_monitor.__func__.__doc__ % all_mpis", "mpis = []\nfor sub_class in monitor.common.Monitor.__subclasses__():\n if module is not None and sub_class.module(sub_class) != module:\n continue\n if purpose is not None and sub_class.purpose(sub_class) != purpose:\n continue\n m_class = sub_class()\n mpis.append(m_class)\nreturn mpis", "mpis = MPI.get_monitors(module, purpose)\nif len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\nreturn mpis[0]", "mpis = []\nfor sub_class in pool:\n if module is not None and sub_class.module() != module:\n continue\n if purpose is not None and sub_class.purpose() != purpose:\n continue\n mpis.append(sub_class)\nif len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors in pool'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\nreturn mpis[0]", "mts = []\nfor m_mpi in monitors:\n if pool is None:\n mon = MPI.get_monitor(m_mpi[0], m_mpi[1])\n else:\n mon = MPI.get_monitor_pooled(m_mpi[0], m_mpi[1], pool)\n m_thread = ThreadedCall(mon.report, ('data', None, m_mpi[2]))\n mts.append(m_thread)\n m_thread.start()\nrets = []\nfor m_thread in mts:\n start = time.time()\n ret = m_thread.get_result()\n end = time.time()\n LOGGER.debug('MPI.%s: Cost %s s to call %s, ret=%s', inspect.stack()[0][3], end - start, m_thread.func, str(ret))\n if isinstance(ret, Exception):\n return ret\n rets += ret\nreturn rets"], "bodies_text": "<|body_start_0|>\n all_mpis = []\n all_modules = []\n all_purposes = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n all_mpis.append((sub_class.module(sub_class), sub_class.purpose(sub_class)))\n all_modules.append(sub_class.module(sub_class))\n all_purposes.append(sub_class.purpose(sub_class))\n self.get_monitors.__func__.__doc__ = self.get_monitors.__func__.__doc__ % (set(all_modules), set(all_purposes))\n self.get_monitor.__func__.__doc__ = self.get_monitor.__func__.__doc__ % all_mpis\n<|end_body_0|>\n\n<|body_start_1|>\n mpis = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n if module is not None and sub_class.module(sub_class) != module:\n continue\n if purpose is not None and sub_class.purpose(sub_class) != purpose:\n continue\n m_class = sub_class()\n mpis.append(m_class)\n return mpis\n<|end_body_1|>\n\n<|body_start_2|>\n mpis = MPI.get_monitors(module, purpose)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_2|>\n\n<|body_start_3|>\n mpis = []\n for sub_class in pool:\n if module is not None and sub_class.module() != module:\n continue\n if purpose is not None and sub_class.purpose() != purpose:\n continue\n mpis.append(sub_class)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors in pool'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_3|>\n\n<|body_start_4|>\n mts = []\n for m_mpi in monitors:\n if pool is None:\n mon = MPI.get_monitor(m_mpi[0], m_mpi[1])\n else:\n mon = MPI.get_monitor_pooled(m_mpi[0], m_mpi[1], pool)\n m_thread = ThreadedCall(mon.report, ('data', None, m_mpi[2]))\n mts.append(m_thread)\n m_thread.start()\n rets = []\n for m_thread in mts:\n start = time.time()\n ret = m_thread.get_result()\n end = time.time()\n LOGGER.debug('MPI.%s: Cost %s s to call %s, ret=%s', inspect.stack()[0][3], end - start, m_thread.func, str(ret))\n if isinstance(ret, Exception):\n return ret\n rets += ret\n return rets\n<|end_body_4|>\n", "class_docstring": "The monitor plugin", "class_name": "MPI", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MPI:\n \"\"\"The monitor plugin\"\"\"\n\n def __init__(self):\n \"\"\"Initialize. :param: None :returns: None :raises: None\"\"\"\n <|body_0|>\n\n def get_monitors(cls, module=None, purpose=None):\n \"\"\"Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\"\"\"\n <|body_1|>\n\n def get_monitor(cls, module, purpose):\n \"\"\"Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_2|>\n\n def get_monitor_pooled(cls, module, purpose, pool):\n \"\"\"Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_3|>\n\n def get_monitors_data(cls, monitors, pool=None):\n \"\"\"Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n all_mpis = []\n all_modules = []\n all_purposes = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n all_mpis.append((sub_class.module(sub_class), sub_class.purpose(sub_class)))\n all_modules.append(sub_class.module(sub_class))\n all_purposes.append(sub_class.purpose(sub_class))\n self.get_monitors.__func__.__doc__ = self.get_monitors.__func__.__doc__ % (set(all_modules), set(all_purposes))\n self.get_monitor.__func__.__doc__ = self.get_monitor.__func__.__doc__ % all_mpis\n<|end_body_0|>\n\n<|body_start_1|>\n mpis = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n if module is not None and sub_class.module(sub_class) != module:\n continue\n if purpose is not None and sub_class.purpose(sub_class) != purpose:\n continue\n m_class = sub_class()\n mpis.append(m_class)\n return mpis\n<|end_body_1|>\n\n<|body_start_2|>\n mpis = MPI.get_monitors(module, purpose)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_2|>\n\n<|body_start_3|>\n mpis = []\n for sub_class in pool:\n if module is not None and sub_class.module() != module:\n continue\n if purpose is not None and sub_class.purpose() != purpose:\n continue\n mpis.append(sub_class)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors in pool'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_3|>\n\n<|body_start_4|>\n mts = []\n for m_mpi in monitors:\n if pool is None:\n mon = MPI.get_monitor(m_mpi[0], m_mpi[1])\n else:\n mon = MPI.get_monitor_pooled(m_mpi[0], m_mpi[1], pool)\n m_thread = ThreadedCall(mon.report, ('data', None, m_mpi[2]))\n mts.append(m_thread)\n m_thread.start()\n rets = []\n for m_thread in mts:\n start = time.time()\n ret = m_thread.get_result()\n end = time.time()\n LOGGER.debug('MPI.%s: Cost %s s to call %s, ret=%s', inspect.stack()[0][3], end - start, m_thread.func, str(ret))\n if isinstance(ret, Exception):\n return ret\n rets += ret\n return rets\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000250", "length_bytes": 7577, "license_type": "no_license", "methods": [{"docstring": "Initialize. :param: None :returns: None :raises: None", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None", "name": "get_monitors", "signature": "def get_monitors(cls, module=None, purpose=None)"}, {"docstring": "Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error", "name": "get_monitor", "signature": "def get_monitor(cls, module, purpose)"}, {"docstring": "Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error", "name": "get_monitor_pooled", "signature": "def get_monitor_pooled(cls, module, purpose, pool)"}, {"docstring": "Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error", "name": "get_monitors_data", "signature": "def get_monitors_data(cls, monitors, pool=None)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_000308", "prompt": "Implement the Python class `MPI` described below.\n\nClass description:\nThe monitor plugin\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize. :param: None :returns: None :raises: None\n- def get_monitors(cls, module=None, purpose=None): Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\n- def get_monitor(cls, module, purpose): Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\n- def get_monitor_pooled(cls, module, purpose, pool): Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\n- def get_monitors_data(cls, monitors, pool=None): Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error", "prompted_full_text": "Implement the Python class `MPI` described below.\n\nClass description:\nThe monitor plugin\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize. :param: None :returns: None :raises: None\n- def get_monitors(cls, module=None, purpose=None): Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\n- def get_monitor(cls, module, purpose): Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\n- def get_monitor_pooled(cls, module, purpose, pool): Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\n- def get_monitors_data(cls, monitors, pool=None): Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error\n\n<|skeleton|>\nclass MPI:\n \"\"\"The monitor plugin\"\"\"\n\n def __init__(self):\n \"\"\"Initialize. :param: None :returns: None :raises: None\"\"\"\n <|body_0|>\n\n def get_monitors(cls, module=None, purpose=None):\n \"\"\"Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\"\"\"\n <|body_1|>\n\n def get_monitor(cls, module, purpose):\n \"\"\"Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_2|>\n\n def get_monitor_pooled(cls, module, purpose, pool):\n \"\"\"Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_3|>\n\n def get_monitors_data(cls, monitors, pool=None):\n \"\"\"Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n all_mpis = []\n all_modules = []\n all_purposes = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n all_mpis.append((sub_class.module(sub_class), sub_class.purpose(sub_class)))\n all_modules.append(sub_class.module(sub_class))\n all_purposes.append(sub_class.purpose(sub_class))\n self.get_monitors.__func__.__doc__ = self.get_monitors.__func__.__doc__ % (set(all_modules), set(all_purposes))\n self.get_monitor.__func__.__doc__ = self.get_monitor.__func__.__doc__ % all_mpis\n<|end_body_0|>\n\n<|body_start_1|>\n mpis = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n if module is not None and sub_class.module(sub_class) != module:\n continue\n if purpose is not None and sub_class.purpose(sub_class) != purpose:\n continue\n m_class = sub_class()\n mpis.append(m_class)\n return mpis\n<|end_body_1|>\n\n<|body_start_2|>\n mpis = MPI.get_monitors(module, purpose)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_2|>\n\n<|body_start_3|>\n mpis = []\n for sub_class in pool:\n if module is not None and sub_class.module() != module:\n continue\n if purpose is not None and sub_class.purpose() != purpose:\n continue\n mpis.append(sub_class)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors in pool'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n<|end_body_3|>\n\n<|body_start_4|>\n mts = []\n for m_mpi in monitors:\n if pool is None:\n mon = MPI.get_monitor(m_mpi[0], m_mpi[1])\n else:\n mon = MPI.get_monitor_pooled(m_mpi[0], m_mpi[1], pool)\n m_thread = ThreadedCall(mon.report, ('data', None, m_mpi[2]))\n mts.append(m_thread)\n m_thread.start()\n rets = []\n for m_thread in mts:\n start = time.time()\n ret = m_thread.get_result()\n end = time.time()\n LOGGER.debug('MPI.%s: Cost %s s to call %s, ret=%s', inspect.stack()[0][3], end - start, m_thread.func, str(ret))\n if isinstance(ret, Exception):\n return ret\n rets += ret\n return rets\n<|end_body_4|>\n", "revision_id": "e4f257d00305849b9a52a033651da09412436785", "skeleton": "<|skeleton|>\nclass MPI:\n \"\"\"The monitor plugin\"\"\"\n\n def __init__(self):\n \"\"\"Initialize. :param: None :returns: None :raises: None\"\"\"\n <|body_0|>\n\n def get_monitors(cls, module=None, purpose=None):\n \"\"\"Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\"\"\"\n <|body_1|>\n\n def get_monitor(cls, module, purpose):\n \"\"\"Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_2|>\n\n def get_monitor_pooled(cls, module, purpose, pool):\n \"\"\"Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n <|body_3|>\n\n def get_monitors_data(cls, monitors, pool=None):\n \"\"\"Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MPI:\n \"\"\"The monitor plugin\"\"\"\n\n def __init__(self):\n \"\"\"Initialize. :param: None :returns: None :raises: None\"\"\"\n all_mpis = []\n all_modules = []\n all_purposes = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n all_mpis.append((sub_class.module(sub_class), sub_class.purpose(sub_class)))\n all_modules.append(sub_class.module(sub_class))\n all_purposes.append(sub_class.purpose(sub_class))\n self.get_monitors.__func__.__doc__ = self.get_monitors.__func__.__doc__ % (set(all_modules), set(all_purposes))\n self.get_monitor.__func__.__doc__ = self.get_monitor.__func__.__doc__ % all_mpis\n\n def get_monitors(cls, module=None, purpose=None):\n \"\"\"Get monitors of 'module' for 'purpose'. :param module(optional): %s :param purpose(optional): %s :returns list: Success, all found monitors or null :raises: None\"\"\"\n mpis = []\n for sub_class in monitor.common.Monitor.__subclasses__():\n if module is not None and sub_class.module(sub_class) != module:\n continue\n if purpose is not None and sub_class.purpose(sub_class) != purpose:\n continue\n m_class = sub_class()\n mpis.append(m_class)\n return mpis\n\n def get_monitor(cls, module, purpose):\n \"\"\"Get monitor of 'module' for 'purpose'. :param module & purpose: %s :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n mpis = MPI.get_monitors(module, purpose)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n\n def get_monitor_pooled(cls, module, purpose, pool):\n \"\"\"Get monitor of 'module' for 'purpose' in pool. :param module & purpose: see get_monitor() :param pool: monitors pool for looking up :returns mpi: Success, the found monitor :raises LookupError: Fail, find monitor error\"\"\"\n mpis = []\n for sub_class in pool:\n if module is not None and sub_class.module() != module:\n continue\n if purpose is not None and sub_class.purpose() != purpose:\n continue\n mpis.append(sub_class)\n if len(mpis) != 1:\n err = LookupError('Find {} {}-{} monitors in pool'.format(len(mpis), module, purpose))\n LOGGER.error('MPI.%s: %s', inspect.stack()[0][3], str(err))\n raise err\n return mpis[0]\n\n def get_monitors_data(cls, monitors, pool=None):\n \"\"\"Get given monitors report data in one. :param monitors: ((module, purpose, options), ...) options is for report(para) :param pool: monitors pool for looking up :returns list: Success, decoded data strings of all given monitors :returns Exceptions: Success, formatted info :raises LookupError: Fail, find monitor error\"\"\"\n mts = []\n for m_mpi in monitors:\n if pool is None:\n mon = MPI.get_monitor(m_mpi[0], m_mpi[1])\n else:\n mon = MPI.get_monitor_pooled(m_mpi[0], m_mpi[1], pool)\n m_thread = ThreadedCall(mon.report, ('data', None, m_mpi[2]))\n mts.append(m_thread)\n m_thread.start()\n rets = []\n for m_thread in mts:\n start = time.time()\n ret = m_thread.get_result()\n end = time.time()\n LOGGER.debug('MPI.%s: Cost %s s to call %s, ret=%s', inspect.stack()[0][3], end - start, m_thread.func, str(ret))\n if isinstance(ret, Exception):\n return ret\n rets += ret\n return rets\n", "source": "the_stack_v2_python_sparse", "source_path": "analysis/plugin/plugin.py", "source_repo": "hanxinke/A-Tune", "split": "val", "star_events_count": 0} {"blob_id": "81211fd83abd479e819a2c568e9b9b97dd48cfb0", "bodies": ["self.agency = agency\nself.base_url = base_url\nself.client_certificate_password = client_certificate_password\nself.mission = mission\nself.role = role", "if dictionary is None:\n return None\nagency = dictionary.get('agency')\nbase_url = dictionary.get('baseUrl')\nclient_certificate_password = dictionary.get('clientCertificatePassword')\nmission = dictionary.get('mission')\nrole = dictionary.get('role')\nreturn cls(agency, base_url, client_certificate_password, mission, role)"], "bodies_text": "<|body_start_0|>\n self.agency = agency\n self.base_url = base_url\n self.client_certificate_password = client_certificate_password\n self.mission = mission\n self.role = role\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agency = dictionary.get('agency')\n base_url = dictionary.get('baseUrl')\n client_certificate_password = dictionary.get('clientCertificatePassword')\n mission = dictionary.get('mission')\n role = dictionary.get('role')\n return cls(agency, base_url, client_certificate_password, mission, role)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.", "class_name": "C2SAccessPortal", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass C2SAccessPortal:\n \"\"\"Implementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\"\"\"\n\n def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None):\n \"\"\"Constructor for the C2SAccessPortal class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.agency = agency\n self.base_url = base_url\n self.client_certificate_password = client_certificate_password\n self.mission = mission\n self.role = role\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agency = dictionary.get('agency')\n base_url = dictionary.get('baseUrl')\n client_certificate_password = dictionary.get('clientCertificatePassword')\n mission = dictionary.get('mission')\n role = dictionary.get('role')\n return cls(agency, base_url, client_certificate_password, mission, role)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000251", "length_bytes": 2391, "license_type": "permissive", "methods": [{"docstring": "Constructor for the C2SAccessPortal class", "name": "__init__", "signature": "def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `C2SAccessPortal` described below.\n\nClass description:\nImplementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\n\nMethod signatures and docstrings:\n- def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None): Constructor for the C2SAccessPortal class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `C2SAccessPortal` described below.\n\nClass description:\nImplementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\n\nMethod signatures and docstrings:\n- def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None): Constructor for the C2SAccessPortal class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass C2SAccessPortal:\n \"\"\"Implementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\"\"\"\n\n def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None):\n \"\"\"Constructor for the C2SAccessPortal class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.agency = agency\n self.base_url = base_url\n self.client_certificate_password = client_certificate_password\n self.mission = mission\n self.role = role\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n agency = dictionary.get('agency')\n base_url = dictionary.get('baseUrl')\n client_certificate_password = dictionary.get('clientCertificatePassword')\n mission = dictionary.get('mission')\n role = dictionary.get('role')\n return cls(agency, base_url, client_certificate_password, mission, role)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass C2SAccessPortal:\n \"\"\"Implementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\"\"\"\n\n def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None):\n \"\"\"Constructor for the C2SAccessPortal class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class C2SAccessPortal:\n \"\"\"Implementation of the 'C2SAccessPortal' model. Specifies information required to connect to CAP to get AWS credentials. C2SAccessPortal(CAP) is AWS commercial cloud service access portal. Attributes: agency (string): Name of the agency. base_url (string): The base url of C2S CAP server. client_certificate_password (string): Encrypted password of the client private key. mission (string): Name of the mission. role (string): Role type.\"\"\"\n\n def __init__(self, agency=None, base_url=None, client_certificate_password=None, mission=None, role=None):\n \"\"\"Constructor for the C2SAccessPortal class\"\"\"\n self.agency = agency\n self.base_url = base_url\n self.client_certificate_password = client_certificate_password\n self.mission = mission\n self.role = role\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n agency = dictionary.get('agency')\n base_url = dictionary.get('baseUrl')\n client_certificate_password = dictionary.get('clientCertificatePassword')\n mission = dictionary.get('mission')\n role = dictionary.get('role')\n return cls(agency, base_url, client_certificate_password, mission, role)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/c2s_access_portal.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "57019740b8480f3926887ee06a1cb8e92300a8bf", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn ServiceUpdateMessage()", "from .item_body import ItemBody\nfrom .service_announcement_attachment import ServiceAnnouncementAttachment\nfrom .service_announcement_base import ServiceAnnouncementBase\nfrom .service_update_category import ServiceUpdateCategory\nfrom .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\nfrom .service_update_severity import ServiceUpdateSeverity\nfrom .item_body import ItemBody\nfrom .service_announcement_attachment import ServiceAnnouncementAttachment\nfrom .service_announcement_base import ServiceAnnouncementBase\nfrom .service_update_category import ServiceUpdateCategory\nfrom .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\nfrom .service_update_severity import ServiceUpdateSeverity\nfields: Dict[str, Callable[[Any], None]] = {'actionRequiredByDateTime': lambda n: setattr(self, 'action_required_by_date_time', n.get_datetime_value()), 'attachments': lambda n: setattr(self, 'attachments', n.get_collection_of_object_values(ServiceAnnouncementAttachment)), 'attachmentsArchive': lambda n: setattr(self, 'attachments_archive', n.get_bytes_value()), 'body': lambda n: setattr(self, 'body', n.get_object_value(ItemBody)), 'category': lambda n: setattr(self, 'category', n.get_enum_value(ServiceUpdateCategory)), 'hasAttachments': lambda n: setattr(self, 'has_attachments', n.get_bool_value()), 'isMajorChange': lambda n: setattr(self, 'is_major_change', n.get_bool_value()), 'services': lambda n: setattr(self, 'services', n.get_collection_of_primitive_values(str)), 'severity': lambda n: setattr(self, 'severity', n.get_enum_value(ServiceUpdateSeverity)), 'tags': lambda n: setattr(self, 'tags', n.get_collection_of_primitive_values(str)), 'viewPoint': lambda n: setattr(self, 'view_point', n.get_object_value(ServiceUpdateMessageViewpoint))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_datetime_value('actionRequiredByDateTime', self.action_required_by_date_time)\nwriter.write_collection_of_object_values('attachments', self.attachments)\nwriter.write_bytes_value('attachmentsArchive', self.attachments_archive)\nwriter.write_object_value('body', self.body)\nwriter.write_enum_value('category', self.category)\nwriter.write_bool_value('hasAttachments', self.has_attachments)\nwriter.write_bool_value('isMajorChange', self.is_major_change)\nwriter.write_collection_of_primitive_values('services', self.services)\nwriter.write_enum_value('severity', self.severity)\nwriter.write_collection_of_primitive_values('tags', self.tags)\nwriter.write_object_value('viewPoint', self.view_point)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceUpdateMessage()\n<|end_body_0|>\n\n<|body_start_1|>\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n fields: Dict[str, Callable[[Any], None]] = {'actionRequiredByDateTime': lambda n: setattr(self, 'action_required_by_date_time', n.get_datetime_value()), 'attachments': lambda n: setattr(self, 'attachments', n.get_collection_of_object_values(ServiceAnnouncementAttachment)), 'attachmentsArchive': lambda n: setattr(self, 'attachments_archive', n.get_bytes_value()), 'body': lambda n: setattr(self, 'body', n.get_object_value(ItemBody)), 'category': lambda n: setattr(self, 'category', n.get_enum_value(ServiceUpdateCategory)), 'hasAttachments': lambda n: setattr(self, 'has_attachments', n.get_bool_value()), 'isMajorChange': lambda n: setattr(self, 'is_major_change', n.get_bool_value()), 'services': lambda n: setattr(self, 'services', n.get_collection_of_primitive_values(str)), 'severity': lambda n: setattr(self, 'severity', n.get_enum_value(ServiceUpdateSeverity)), 'tags': lambda n: setattr(self, 'tags', n.get_collection_of_primitive_values(str)), 'viewPoint': lambda n: setattr(self, 'view_point', n.get_object_value(ServiceUpdateMessageViewpoint))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('actionRequiredByDateTime', self.action_required_by_date_time)\n writer.write_collection_of_object_values('attachments', self.attachments)\n writer.write_bytes_value('attachmentsArchive', self.attachments_archive)\n writer.write_object_value('body', self.body)\n writer.write_enum_value('category', self.category)\n writer.write_bool_value('hasAttachments', self.has_attachments)\n writer.write_bool_value('isMajorChange', self.is_major_change)\n writer.write_collection_of_primitive_values('services', self.services)\n writer.write_enum_value('severity', self.severity)\n writer.write_collection_of_primitive_values('tags', self.tags)\n writer.write_object_value('viewPoint', self.view_point)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ServiceUpdateMessage", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ServiceUpdateMessage:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceUpdateMessage()\n<|end_body_0|>\n\n<|body_start_1|>\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n fields: Dict[str, Callable[[Any], None]] = {'actionRequiredByDateTime': lambda n: setattr(self, 'action_required_by_date_time', n.get_datetime_value()), 'attachments': lambda n: setattr(self, 'attachments', n.get_collection_of_object_values(ServiceAnnouncementAttachment)), 'attachmentsArchive': lambda n: setattr(self, 'attachments_archive', n.get_bytes_value()), 'body': lambda n: setattr(self, 'body', n.get_object_value(ItemBody)), 'category': lambda n: setattr(self, 'category', n.get_enum_value(ServiceUpdateCategory)), 'hasAttachments': lambda n: setattr(self, 'has_attachments', n.get_bool_value()), 'isMajorChange': lambda n: setattr(self, 'is_major_change', n.get_bool_value()), 'services': lambda n: setattr(self, 'services', n.get_collection_of_primitive_values(str)), 'severity': lambda n: setattr(self, 'severity', n.get_enum_value(ServiceUpdateSeverity)), 'tags': lambda n: setattr(self, 'tags', n.get_collection_of_primitive_values(str)), 'viewPoint': lambda n: setattr(self, 'view_point', n.get_object_value(ServiceUpdateMessageViewpoint))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('actionRequiredByDateTime', self.action_required_by_date_time)\n writer.write_collection_of_object_values('attachments', self.attachments)\n writer.write_bytes_value('attachmentsArchive', self.attachments_archive)\n writer.write_object_value('body', self.body)\n writer.write_enum_value('category', self.category)\n writer.write_bool_value('hasAttachments', self.has_attachments)\n writer.write_bool_value('isMajorChange', self.is_major_change)\n writer.write_collection_of_primitive_values('services', self.services)\n writer.write_enum_value('severity', self.severity)\n writer.write_collection_of_primitive_values('tags', self.tags)\n writer.write_object_value('viewPoint', self.view_point)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000252", "length_bytes": 6416, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `ServiceUpdateMessage` described below.\n\nClass description:\nImplement the ServiceUpdateMessage class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `ServiceUpdateMessage` described below.\n\nClass description:\nImplement the ServiceUpdateMessage class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass ServiceUpdateMessage:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceUpdateMessage()\n<|end_body_0|>\n\n<|body_start_1|>\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n fields: Dict[str, Callable[[Any], None]] = {'actionRequiredByDateTime': lambda n: setattr(self, 'action_required_by_date_time', n.get_datetime_value()), 'attachments': lambda n: setattr(self, 'attachments', n.get_collection_of_object_values(ServiceAnnouncementAttachment)), 'attachmentsArchive': lambda n: setattr(self, 'attachments_archive', n.get_bytes_value()), 'body': lambda n: setattr(self, 'body', n.get_object_value(ItemBody)), 'category': lambda n: setattr(self, 'category', n.get_enum_value(ServiceUpdateCategory)), 'hasAttachments': lambda n: setattr(self, 'has_attachments', n.get_bool_value()), 'isMajorChange': lambda n: setattr(self, 'is_major_change', n.get_bool_value()), 'services': lambda n: setattr(self, 'services', n.get_collection_of_primitive_values(str)), 'severity': lambda n: setattr(self, 'severity', n.get_enum_value(ServiceUpdateSeverity)), 'tags': lambda n: setattr(self, 'tags', n.get_collection_of_primitive_values(str)), 'viewPoint': lambda n: setattr(self, 'view_point', n.get_object_value(ServiceUpdateMessageViewpoint))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('actionRequiredByDateTime', self.action_required_by_date_time)\n writer.write_collection_of_object_values('attachments', self.attachments)\n writer.write_bytes_value('attachmentsArchive', self.attachments_archive)\n writer.write_object_value('body', self.body)\n writer.write_enum_value('category', self.category)\n writer.write_bool_value('hasAttachments', self.has_attachments)\n writer.write_bool_value('isMajorChange', self.is_major_change)\n writer.write_collection_of_primitive_values('services', self.services)\n writer.write_enum_value('severity', self.severity)\n writer.write_collection_of_primitive_values('tags', self.tags)\n writer.write_object_value('viewPoint', self.view_point)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass ServiceUpdateMessage:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ServiceUpdateMessage:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceUpdateMessage:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceUpdateMessage\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceUpdateMessage()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n from .item_body import ItemBody\n from .service_announcement_attachment import ServiceAnnouncementAttachment\n from .service_announcement_base import ServiceAnnouncementBase\n from .service_update_category import ServiceUpdateCategory\n from .service_update_message_viewpoint import ServiceUpdateMessageViewpoint\n from .service_update_severity import ServiceUpdateSeverity\n fields: Dict[str, Callable[[Any], None]] = {'actionRequiredByDateTime': lambda n: setattr(self, 'action_required_by_date_time', n.get_datetime_value()), 'attachments': lambda n: setattr(self, 'attachments', n.get_collection_of_object_values(ServiceAnnouncementAttachment)), 'attachmentsArchive': lambda n: setattr(self, 'attachments_archive', n.get_bytes_value()), 'body': lambda n: setattr(self, 'body', n.get_object_value(ItemBody)), 'category': lambda n: setattr(self, 'category', n.get_enum_value(ServiceUpdateCategory)), 'hasAttachments': lambda n: setattr(self, 'has_attachments', n.get_bool_value()), 'isMajorChange': lambda n: setattr(self, 'is_major_change', n.get_bool_value()), 'services': lambda n: setattr(self, 'services', n.get_collection_of_primitive_values(str)), 'severity': lambda n: setattr(self, 'severity', n.get_enum_value(ServiceUpdateSeverity)), 'tags': lambda n: setattr(self, 'tags', n.get_collection_of_primitive_values(str)), 'viewPoint': lambda n: setattr(self, 'view_point', n.get_object_value(ServiceUpdateMessageViewpoint))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('actionRequiredByDateTime', self.action_required_by_date_time)\n writer.write_collection_of_object_values('attachments', self.attachments)\n writer.write_bytes_value('attachmentsArchive', self.attachments_archive)\n writer.write_object_value('body', self.body)\n writer.write_enum_value('category', self.category)\n writer.write_bool_value('hasAttachments', self.has_attachments)\n writer.write_bool_value('isMajorChange', self.is_major_change)\n writer.write_collection_of_primitive_values('services', self.services)\n writer.write_enum_value('severity', self.severity)\n writer.write_collection_of_primitive_values('tags', self.tags)\n writer.write_object_value('viewPoint', self.view_point)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/service_update_message.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135} {"blob_id": "ec83cb02476cf7cbc8047119f63cc717e95f51f2", "bodies": ["super().__init__(*args, command=HydraBase(), **kwargs)\nself.service = service\nself.login = login", "args = []\nif self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\nif self._port.is_ipv6:\n args.append('-6')\nargs.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip), self.service])\nreturn args"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, command=HydraBase(), **kwargs)\n self.service = service\n self.login = login\n<|end_body_0|>\n\n<|body_start_1|>\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip), self.service])\n return args\n<|end_body_1|>\n", "class_docstring": "This is task for Hydra tool. Call Hydra and parse output", "class_name": "HydraScriptTask", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HydraScriptTask:\n \"\"\"This is task for Hydra tool. Call Hydra and parse output\"\"\"\n\n def __init__(self, service, login=True, *args, **kwargs):\n \"\"\"Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\"\"\"\n <|body_0|>\n\n def prepare_args(self):\n \"\"\"Prepare aguments for command execution Returns: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, command=HydraBase(), **kwargs)\n self.service = service\n self.login = login\n<|end_body_0|>\n\n<|body_start_1|>\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip), self.service])\n return args\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000253", "length_bytes": 1216, "license_type": "permissive", "methods": [{"docstring": "Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:", "name": "__init__", "signature": "def __init__(self, service, login=True, *args, **kwargs)"}, {"docstring": "Prepare aguments for command execution Returns: list", "name": "prepare_args", "signature": "def prepare_args(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000070", "prompt": "Implement the Python class `HydraScriptTask` described below.\n\nClass description:\nThis is task for Hydra tool. Call Hydra and parse output\n\nMethod signatures and docstrings:\n- def __init__(self, service, login=True, *args, **kwargs): Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\n- def prepare_args(self): Prepare aguments for command execution Returns: list", "prompted_full_text": "Implement the Python class `HydraScriptTask` described below.\n\nClass description:\nThis is task for Hydra tool. Call Hydra and parse output\n\nMethod signatures and docstrings:\n- def __init__(self, service, login=True, *args, **kwargs): Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\n- def prepare_args(self): Prepare aguments for command execution Returns: list\n\n<|skeleton|>\nclass HydraScriptTask:\n \"\"\"This is task for Hydra tool. Call Hydra and parse output\"\"\"\n\n def __init__(self, service, login=True, *args, **kwargs):\n \"\"\"Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\"\"\"\n <|body_0|>\n\n def prepare_args(self):\n \"\"\"Prepare aguments for command execution Returns: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, command=HydraBase(), **kwargs)\n self.service = service\n self.login = login\n<|end_body_0|>\n\n<|body_start_1|>\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip), self.service])\n return args\n<|end_body_1|>\n", "revision_id": "bb21ff02965ed0cca5a55ee559eae77856d9866c", "skeleton": "<|skeleton|>\nclass HydraScriptTask:\n \"\"\"This is task for Hydra tool. Call Hydra and parse output\"\"\"\n\n def __init__(self, service, login=True, *args, **kwargs):\n \"\"\"Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\"\"\"\n <|body_0|>\n\n def prepare_args(self):\n \"\"\"Prepare aguments for command execution Returns: list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HydraScriptTask:\n \"\"\"This is task for Hydra tool. Call Hydra and parse output\"\"\"\n\n def __init__(self, service, login=True, *args, **kwargs):\n \"\"\"Initialize variables Args: port (Port): Port for scanning service (str): Service name for scanning login (bool): Define if hydra should use login or not *args: **kwargs:\"\"\"\n super().__init__(*args, command=HydraBase(), **kwargs)\n self.service = service\n self.login = login\n\n def prepare_args(self):\n \"\"\"Prepare aguments for command execution Returns: list\"\"\"\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip), self.service])\n return args\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/hydra/tasks.py", "source_repo": "collectivesense/aucote", "split": "val", "star_events_count": 0} {"blob_id": "43bc742e88650eb5c8cbe3c29336985cf2a8c8c7", "bodies": ["super(FarChamferDist, self).__init__()\nself.num_add = num_add\nself.far_dist = FarthestDist()\nself.chamfer_dist = ChamferDist(method=chamfer_method)\nself.cd_w = chamfer_weight", "B = adv_pc.shape[0]\nchamfer_loss = self.chamfer_dist(adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)\nadv_clusters = adv_pc.view(B, self.num_add, -1, 3)\nfar_loss = self.far_dist(adv_clusters, weights=weights, batch_avg=batch_avg)\nloss = far_loss + chamfer_loss * self.cd_w\nreturn loss"], "bodies_text": "<|body_start_0|>\n super(FarChamferDist, self).__init__()\n self.num_add = num_add\n self.far_dist = FarthestDist()\n self.chamfer_dist = ChamferDist(method=chamfer_method)\n self.cd_w = chamfer_weight\n<|end_body_0|>\n\n<|body_start_1|>\n B = adv_pc.shape[0]\n chamfer_loss = self.chamfer_dist(adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)\n adv_clusters = adv_pc.view(B, self.num_add, -1, 3)\n far_loss = self.far_dist(adv_clusters, weights=weights, batch_avg=batch_avg)\n loss = far_loss + chamfer_loss * self.cd_w\n return loss\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FarChamferDist", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FarChamferDist:\n\n def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1):\n \"\"\"Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True):\n \"\"\"Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FarChamferDist, self).__init__()\n self.num_add = num_add\n self.far_dist = FarthestDist()\n self.chamfer_dist = ChamferDist(method=chamfer_method)\n self.cd_w = chamfer_weight\n<|end_body_0|>\n\n<|body_start_1|>\n B = adv_pc.shape[0]\n chamfer_loss = self.chamfer_dist(adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)\n adv_clusters = adv_pc.view(B, self.num_add, -1, 3)\n far_loss = self.far_dist(adv_clusters, weights=weights, batch_avg=batch_avg)\n loss = far_loss + chamfer_loss * self.cd_w\n return loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000254", "length_bytes": 11583, "license_type": "permissive", "methods": [{"docstring": "Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.", "name": "__init__", "signature": "def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1)"}, {"docstring": "Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim", "name": "forward", "signature": "def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004662", "prompt": "Implement the Python class `FarChamferDist` described below.\n\nClass description:\nImplement the FarChamferDist class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1): Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\n- def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True): Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim", "prompted_full_text": "Implement the Python class `FarChamferDist` described below.\n\nClass description:\nImplement the FarChamferDist class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1): Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\n- def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True): Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim\n\n<|skeleton|>\nclass FarChamferDist:\n\n def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1):\n \"\"\"Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True):\n \"\"\"Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FarChamferDist, self).__init__()\n self.num_add = num_add\n self.far_dist = FarthestDist()\n self.chamfer_dist = ChamferDist(method=chamfer_method)\n self.cd_w = chamfer_weight\n<|end_body_0|>\n\n<|body_start_1|>\n B = adv_pc.shape[0]\n chamfer_loss = self.chamfer_dist(adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)\n adv_clusters = adv_pc.view(B, self.num_add, -1, 3)\n far_loss = self.far_dist(adv_clusters, weights=weights, batch_avg=batch_avg)\n loss = far_loss + chamfer_loss * self.cd_w\n return loss\n<|end_body_1|>\n", "revision_id": "4e2462b66fa1eac90cfbf61fa0dc635d223fdf2f", "skeleton": "<|skeleton|>\nclass FarChamferDist:\n\n def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1):\n \"\"\"Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\"\"\"\n <|body_0|>\n\n def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True):\n \"\"\"Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FarChamferDist:\n def __init__(self, num_add, chamfer_method='adv2ori', chamfer_weight=0.1):\n \"\"\"Distance function used in generating adv clusters. Consisting of a Farthest dist and a chamfer dist. Args: num_add (int): number of added clusters. chamfer_method (str, optional): chamfer. Defaults to 'adv2ori'. chamfer_weight (float, optional): weight factor. Defaults to 0.1.\"\"\"\n super(FarChamferDist, self).__init__()\n self.num_add = num_add\n self.far_dist = FarthestDist()\n self.chamfer_dist = ChamferDist(method=chamfer_method)\n self.cd_w = chamfer_weight\n\n def forward(self, adv_pc, ori_pc, weights=None, batch_avg=True):\n \"\"\"Adversarial constraint function of CVPR'19 paper for adv clusters. Args: adv_pc (torch.FloatTensor): [B, num_add * cl_num_p, 3], the added clusters ori_pc (torch.FloatTensor): [B, K, 3] weights (np.array): weight factors batch_avg: (bool, optional): whether to avg over batch dim\"\"\"\n B = adv_pc.shape[0]\n chamfer_loss = self.chamfer_dist(adv_pc, ori_pc, weights=weights, batch_avg=batch_avg)\n adv_clusters = adv_pc.view(B, self.num_add, -1, 3)\n far_loss = self.far_dist(adv_clusters, weights=weights, batch_avg=batch_avg)\n loss = far_loss + chamfer_loss * self.cd_w\n return loss\n", "source": "the_stack_v2_python_sparse", "source_path": "baselines/attack/util/dist_utils.py", "source_repo": "code-roamer/IF-Defense", "split": "val", "star_events_count": 0} {"blob_id": "ac805d02c6376b67f36fb45c03319c709f5cd7da", "bodies": ["self._train_op = optimizer.minimize(loss)\nself._loss = loss\nself._predictions = predictions\nself._ds_train = ds_train\nself._ds_validation = ds_validation\nself._stop_patience = stop_patience\nself._evaluation = evaluation\nself._validation_losses = []\nself._model_inputs = inputs\nself._model_labels = labels\nself._train_loss = []\nself._validation_f1 = []\nwith tf.variable_scope('model', reuse=True):\n self._model_is_training = tf.get_variable('is_training', dtype=tf.bool, trainable=False)", "self._model_is_training = tf.identity(TRUE, name='is_training')\ndsIter = self._ds_train.__iter__()\nmean_loss = 0\nfor i in range(dsIter._len):\n images, labels = next(dsIter)\n _, loss_value, prediction = sess.run([self._train_op, self._loss, self._predictions], feed_dict={self._model_inputs: images, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\nmean_loss = mean_loss / dsIter._len\nself._train_loss.append(mean_loss)\nprint('training epoch:')\nself._evaluation.flush()\nprint('mean loss: ', mean_loss)\npass", "self._model_is_training = tf.identity(FALSE, name='is_validating')\ndsIter = self._ds_validation.__iter__()\nmean_loss = 0\nfor i in range(dsIter._len):\n imgs, labels = next(dsIter)\n prediction, loss_value = sess.run([self._predictions, self._loss], feed_dict={self._model_inputs: imgs, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\nmean_loss = mean_loss / dsIter._len\nself._validation_losses.append(mean_loss)\nf1values = self._evaluation._measures[-1].values()\nf1mean = (f1values[0] + f1values[1]) / 2\nself._validation_f1.append(f1mean)\nprint('validation step:')\nself._evaluation.flush()\nprint('validation_loss: ', mean_loss)\nprint('F1mean: ', f1mean)\npass", "if len(self._validation_f1) < self._stop_patience:\n return False\nelse:\n query = self._validation_f1[-self._stop_patience:]\n if any((query[i + 1] > query[i] for i in range(0, len(query) - 1))):\n return False\n else:\n return True\npass", "self._valid_step(sess)\ni = 0\nwhile i < num_epochs or num_epochs == -1:\n print('epochs:{:d}'.format(i))\n self._train_epoch(sess)\n self._valid_step(sess)\n i += 1\n if self._should_stop():\n break\nprint('end of run')\nprint('start to plot')"], "bodies_text": "<|body_start_0|>\n self._train_op = optimizer.minimize(loss)\n self._loss = loss\n self._predictions = predictions\n self._ds_train = ds_train\n self._ds_validation = ds_validation\n self._stop_patience = stop_patience\n self._evaluation = evaluation\n self._validation_losses = []\n self._model_inputs = inputs\n self._model_labels = labels\n self._train_loss = []\n self._validation_f1 = []\n with tf.variable_scope('model', reuse=True):\n self._model_is_training = tf.get_variable('is_training', dtype=tf.bool, trainable=False)\n<|end_body_0|>\n\n<|body_start_1|>\n self._model_is_training = tf.identity(TRUE, name='is_training')\n dsIter = self._ds_train.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n images, labels = next(dsIter)\n _, loss_value, prediction = sess.run([self._train_op, self._loss, self._predictions], feed_dict={self._model_inputs: images, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._train_loss.append(mean_loss)\n print('training epoch:')\n self._evaluation.flush()\n print('mean loss: ', mean_loss)\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n self._model_is_training = tf.identity(FALSE, name='is_validating')\n dsIter = self._ds_validation.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n imgs, labels = next(dsIter)\n prediction, loss_value = sess.run([self._predictions, self._loss], feed_dict={self._model_inputs: imgs, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._validation_losses.append(mean_loss)\n f1values = self._evaluation._measures[-1].values()\n f1mean = (f1values[0] + f1values[1]) / 2\n self._validation_f1.append(f1mean)\n print('validation step:')\n self._evaluation.flush()\n print('validation_loss: ', mean_loss)\n print('F1mean: ', f1mean)\n pass\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self._validation_f1) < self._stop_patience:\n return False\n else:\n query = self._validation_f1[-self._stop_patience:]\n if any((query[i + 1] > query[i] for i in range(0, len(query) - 1))):\n return False\n else:\n return True\n pass\n<|end_body_3|>\n\n<|body_start_4|>\n self._valid_step(sess)\n i = 0\n while i < num_epochs or num_epochs == -1:\n print('epochs:{:d}'.format(i))\n self._train_epoch(sess)\n self._valid_step(sess)\n i += 1\n if self._should_stop():\n break\n print('end of run')\n print('start to plot')\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Trainer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Trainer:\n\n def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels):\n \"\"\"Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\"\"\"\n <|body_0|>\n\n def _train_epoch(self, sess):\n \"\"\"trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_1|>\n\n def _valid_step(self, sess):\n \"\"\"run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_2|>\n\n def _should_stop(self):\n \"\"\"determine if training should stop according to stop_patience\"\"\"\n <|body_3|>\n\n def run(self, sess, num_epochs=-1):\n \"\"\"run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._train_op = optimizer.minimize(loss)\n self._loss = loss\n self._predictions = predictions\n self._ds_train = ds_train\n self._ds_validation = ds_validation\n self._stop_patience = stop_patience\n self._evaluation = evaluation\n self._validation_losses = []\n self._model_inputs = inputs\n self._model_labels = labels\n self._train_loss = []\n self._validation_f1 = []\n with tf.variable_scope('model', reuse=True):\n self._model_is_training = tf.get_variable('is_training', dtype=tf.bool, trainable=False)\n<|end_body_0|>\n\n<|body_start_1|>\n self._model_is_training = tf.identity(TRUE, name='is_training')\n dsIter = self._ds_train.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n images, labels = next(dsIter)\n _, loss_value, prediction = sess.run([self._train_op, self._loss, self._predictions], feed_dict={self._model_inputs: images, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._train_loss.append(mean_loss)\n print('training epoch:')\n self._evaluation.flush()\n print('mean loss: ', mean_loss)\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n self._model_is_training = tf.identity(FALSE, name='is_validating')\n dsIter = self._ds_validation.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n imgs, labels = next(dsIter)\n prediction, loss_value = sess.run([self._predictions, self._loss], feed_dict={self._model_inputs: imgs, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._validation_losses.append(mean_loss)\n f1values = self._evaluation._measures[-1].values()\n f1mean = (f1values[0] + f1values[1]) / 2\n self._validation_f1.append(f1mean)\n print('validation step:')\n self._evaluation.flush()\n print('validation_loss: ', mean_loss)\n print('F1mean: ', f1mean)\n pass\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self._validation_f1) < self._stop_patience:\n return False\n else:\n query = self._validation_f1[-self._stop_patience:]\n if any((query[i + 1] > query[i] for i in range(0, len(query) - 1))):\n return False\n else:\n return True\n pass\n<|end_body_3|>\n\n<|body_start_4|>\n self._valid_step(sess)\n i = 0\n while i < num_epochs or num_epochs == -1:\n print('epochs:{:d}'.format(i))\n self._train_epoch(sess)\n self._valid_step(sess)\n i += 1\n if self._should_stop():\n break\n print('end of run')\n print('start to plot')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000255", "length_bytes": 6508, "license_type": "permissive", "methods": [{"docstring": "Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels", "name": "__init__", "signature": "def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels)"}, {"docstring": "trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used", "name": "_train_epoch", "signature": "def _train_epoch(self, sess)"}, {"docstring": "run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used", "name": "_valid_step", "signature": "def _valid_step(self, sess)"}, {"docstring": "determine if training should stop according to stop_patience", "name": "_should_stop", "signature": "def _should_stop(self)"}, {"docstring": "run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit", "name": "run", "signature": "def run(self, sess, num_epochs=-1)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005489", "prompt": "Implement the Python class `Trainer` described below.\n\nClass description:\nImplement the Trainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels): Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\n- def _train_epoch(self, sess): trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\n- def _valid_step(self, sess): run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\n- def _should_stop(self): determine if training should stop according to stop_patience\n- def run(self, sess, num_epochs=-1): run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit", "prompted_full_text": "Implement the Python class `Trainer` described below.\n\nClass description:\nImplement the Trainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels): Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\n- def _train_epoch(self, sess): trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\n- def _valid_step(self, sess): run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\n- def _should_stop(self): determine if training should stop according to stop_patience\n- def run(self, sess, num_epochs=-1): run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit\n\n<|skeleton|>\nclass Trainer:\n\n def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels):\n \"\"\"Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\"\"\"\n <|body_0|>\n\n def _train_epoch(self, sess):\n \"\"\"trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_1|>\n\n def _valid_step(self, sess):\n \"\"\"run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_2|>\n\n def _should_stop(self):\n \"\"\"determine if training should stop according to stop_patience\"\"\"\n <|body_3|>\n\n def run(self, sess, num_epochs=-1):\n \"\"\"run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._train_op = optimizer.minimize(loss)\n self._loss = loss\n self._predictions = predictions\n self._ds_train = ds_train\n self._ds_validation = ds_validation\n self._stop_patience = stop_patience\n self._evaluation = evaluation\n self._validation_losses = []\n self._model_inputs = inputs\n self._model_labels = labels\n self._train_loss = []\n self._validation_f1 = []\n with tf.variable_scope('model', reuse=True):\n self._model_is_training = tf.get_variable('is_training', dtype=tf.bool, trainable=False)\n<|end_body_0|>\n\n<|body_start_1|>\n self._model_is_training = tf.identity(TRUE, name='is_training')\n dsIter = self._ds_train.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n images, labels = next(dsIter)\n _, loss_value, prediction = sess.run([self._train_op, self._loss, self._predictions], feed_dict={self._model_inputs: images, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._train_loss.append(mean_loss)\n print('training epoch:')\n self._evaluation.flush()\n print('mean loss: ', mean_loss)\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n self._model_is_training = tf.identity(FALSE, name='is_validating')\n dsIter = self._ds_validation.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n imgs, labels = next(dsIter)\n prediction, loss_value = sess.run([self._predictions, self._loss], feed_dict={self._model_inputs: imgs, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._validation_losses.append(mean_loss)\n f1values = self._evaluation._measures[-1].values()\n f1mean = (f1values[0] + f1values[1]) / 2\n self._validation_f1.append(f1mean)\n print('validation step:')\n self._evaluation.flush()\n print('validation_loss: ', mean_loss)\n print('F1mean: ', f1mean)\n pass\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self._validation_f1) < self._stop_patience:\n return False\n else:\n query = self._validation_f1[-self._stop_patience:]\n if any((query[i + 1] > query[i] for i in range(0, len(query) - 1))):\n return False\n else:\n return True\n pass\n<|end_body_3|>\n\n<|body_start_4|>\n self._valid_step(sess)\n i = 0\n while i < num_epochs or num_epochs == -1:\n print('epochs:{:d}'.format(i))\n self._train_epoch(sess)\n self._valid_step(sess)\n i += 1\n if self._should_stop():\n break\n print('end of run')\n print('start to plot')\n<|end_body_4|>\n", "revision_id": "e66ca5b33645641426edac4da5aed0cb205a5aeb", "skeleton": "<|skeleton|>\nclass Trainer:\n\n def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels):\n \"\"\"Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\"\"\"\n <|body_0|>\n\n def _train_epoch(self, sess):\n \"\"\"trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_1|>\n\n def _valid_step(self, sess):\n \"\"\"run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n <|body_2|>\n\n def _should_stop(self):\n \"\"\"determine if training should stop according to stop_patience\"\"\"\n <|body_3|>\n\n def run(self, sess, num_epochs=-1):\n \"\"\"run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Trainer:\n def __init__(self, loss, predictions, optimizer, ds_train, ds_validation, stop_patience, evaluation, inputs, labels):\n \"\"\"Initialize the trainer Args: loss an operation that computes the loss predictions an operation that computes the predictions for the current optimizer optimizer to use ds_train instance of Dataset that holds the training data ds_validation instance of Dataset that holds the validation data stop_patience the training stops if the validation loss does not decrease for this number of epochs evaluation instance of Evaluation inputs placeholder for model inputs labels placeholder for model labels\"\"\"\n self._train_op = optimizer.minimize(loss)\n self._loss = loss\n self._predictions = predictions\n self._ds_train = ds_train\n self._ds_validation = ds_validation\n self._stop_patience = stop_patience\n self._evaluation = evaluation\n self._validation_losses = []\n self._model_inputs = inputs\n self._model_labels = labels\n self._train_loss = []\n self._validation_f1 = []\n with tf.variable_scope('model', reuse=True):\n self._model_is_training = tf.get_variable('is_training', dtype=tf.bool, trainable=False)\n\n def _train_epoch(self, sess):\n \"\"\"trains for one epoch and prints the mean training loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n self._model_is_training = tf.identity(TRUE, name='is_training')\n dsIter = self._ds_train.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n images, labels = next(dsIter)\n _, loss_value, prediction = sess.run([self._train_op, self._loss, self._predictions], feed_dict={self._model_inputs: images, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._train_loss.append(mean_loss)\n print('training epoch:')\n self._evaluation.flush()\n print('mean loss: ', mean_loss)\n pass\n\n def _valid_step(self, sess):\n \"\"\"run the validation and print evalution + mean validation loss to the commandline args: sess the tensorflow session that should be used\"\"\"\n self._model_is_training = tf.identity(FALSE, name='is_validating')\n dsIter = self._ds_validation.__iter__()\n mean_loss = 0\n for i in range(dsIter._len):\n imgs, labels = next(dsIter)\n prediction, loss_value = sess.run([self._predictions, self._loss], feed_dict={self._model_inputs: imgs, self._model_labels: labels})\n mean_loss = mean_loss + loss_value\n self._evaluation.add_batch(prediction, labels)\n mean_loss = mean_loss / dsIter._len\n self._validation_losses.append(mean_loss)\n f1values = self._evaluation._measures[-1].values()\n f1mean = (f1values[0] + f1values[1]) / 2\n self._validation_f1.append(f1mean)\n print('validation step:')\n self._evaluation.flush()\n print('validation_loss: ', mean_loss)\n print('F1mean: ', f1mean)\n pass\n\n def _should_stop(self):\n \"\"\"determine if training should stop according to stop_patience\"\"\"\n if len(self._validation_f1) < self._stop_patience:\n return False\n else:\n query = self._validation_f1[-self._stop_patience:]\n if any((query[i + 1] > query[i] for i in range(0, len(query) - 1))):\n return False\n else:\n return True\n pass\n\n def run(self, sess, num_epochs=-1):\n \"\"\"run the training until num_epochs exceeds or the validation loss did not decrease for stop_patience epochs args: sess the tensorflow session that should be used num_epochs limit to the number of epochs, -1 means not limit\"\"\"\n self._valid_step(sess)\n i = 0\n while i < num_epochs or num_epochs == -1:\n print('epochs:{:d}'.format(i))\n self._train_epoch(sess)\n self._valid_step(sess)\n i += 1\n if self._should_stop():\n break\n print('end of run')\n print('start to plot')\n", "source": "the_stack_v2_python_sparse", "source_path": "train/trainer.py", "source_repo": "snowskysun/Classification-of-solar-cell-defects", "split": "val", "star_events_count": 0} {"blob_id": "e7936574823b3a54b6fb376de1f70baad89dd8b1", "bodies": ["self.identifiers = None\nself._real_scalers = None\nself._cat_scalers = None\nself._target_scaler = None\nself._num_classes_per_cat_input = None\nself._time_steps = get_fixed_params()['total_time_steps']\nself._num_encoder_steps = get_fixed_params()['num_encoder_steps']", "print_info('Formatting train-valid-test splits.')\nindex = df['days_from_start']\ntrain = df.loc[index < valid_boundary]\nvalid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\ntest = df.loc[index >= test_boundary - 7]\nself.set_scalers(train)\nreturn (self.transform_inputs(data) for data in [train, valid, test])", "print_info('Setting scalers with training data...')\ncolumn_definitions = self.getcolumn_definition()\nid_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\ntarget_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\nreal_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\nself.real_scalers = {}\nself.target_scaler = {}\nidentifiers = []\nfor identifier, sliced in df.groupby(id_column):\n if len(sliced) >= self._time_steps:\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\ncategorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\ncategorical_scalers = {}\nnum_classes = []\nfor col in categorical_inputs:\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\nself._cat_scalers = categorical_scalers\nself._num_classes_per_cat_input = num_classes\nself.identifiers = identifiers", "if self.real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\ncolumn_definitions = self.getcolumn_definition()\nid_col = get_single_col_by_input_type(InputTypes.ID, column_definitions)\nreal_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\ncategorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\ndf_list = []\nfor identifier, sliced in df.groupby(id_col):\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self.real_scalers[identifier].transform(sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\noutput = pd.concat(df_list, axis=0)\nfor col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\nreturn output", "column_definition = self.column_definition\n\ndef _check_single_column(input_type):\n length = len([tup for tup in column_definition if tup[2] == input_type])\n if length != 1:\n raise ValueError('Illegal number of inputs ({}) of type {}'.format(length, input_type))\n_check_single_column(InputTypes.ID)\n_check_single_column(InputTypes.TIME)\nidentifier = [tup for tup in column_definition if tup[2] == InputTypes.ID]\ntime = [tup for tup in column_definition if tup[2] == InputTypes.TIME]\nreal_inputs = [tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\ncategorical_inputs = [tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\nreturn identifier + time + real_inputs + categorical_inputs"], "bodies_text": "<|body_start_0|>\n self.identifiers = None\n self._real_scalers = None\n self._cat_scalers = None\n self._target_scaler = None\n self._num_classes_per_cat_input = None\n self._time_steps = get_fixed_params()['total_time_steps']\n self._num_encoder_steps = get_fixed_params()['num_encoder_steps']\n<|end_body_0|>\n\n<|body_start_1|>\n print_info('Formatting train-valid-test splits.')\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n self.set_scalers(train)\n return (self.transform_inputs(data) for data in [train, valid, test])\n<|end_body_1|>\n\n<|body_start_2|>\n print_info('Setting scalers with training data...')\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n if len(sliced) >= self._time_steps:\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n self.identifiers = identifiers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\n column_definitions = self.getcolumn_definition()\n id_col = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n df_list = []\n for identifier, sliced in df.groupby(id_col):\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self.real_scalers[identifier].transform(sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\n output = pd.concat(df_list, axis=0)\n for col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\n return output\n<|end_body_3|>\n\n<|body_start_4|>\n column_definition = self.column_definition\n\n def _check_single_column(input_type):\n length = len([tup for tup in column_definition if tup[2] == input_type])\n if length != 1:\n raise ValueError('Illegal number of inputs ({}) of type {}'.format(length, input_type))\n _check_single_column(InputTypes.ID)\n _check_single_column(InputTypes.TIME)\n identifier = [tup for tup in column_definition if tup[2] == InputTypes.ID]\n time = [tup for tup in column_definition if tup[2] == InputTypes.TIME]\n real_inputs = [tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n categorical_inputs = [tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n return identifier + time + real_inputs + categorical_inputs\n<|end_body_4|>\n", "class_docstring": "Defines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.", "class_name": "ElectricityFormatter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ElectricityFormatter:\n \"\"\"Defines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\"\"\"\n\n def __init__(self):\n \"\"\"Initialises formatter.\"\"\"\n <|body_0|>\n\n def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n \"\"\"Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\"\"\"\n <|body_1|>\n\n def set_scalers(self, df):\n \"\"\"Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\"\"\"\n <|body_2|>\n\n def transform_inputs(self, df):\n \"\"\"Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\"\"\"\n <|body_3|>\n\n def getcolumn_definition(self):\n \"\"\"\"Returns formatted column definition in order expected by the TFT.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.identifiers = None\n self._real_scalers = None\n self._cat_scalers = None\n self._target_scaler = None\n self._num_classes_per_cat_input = None\n self._time_steps = get_fixed_params()['total_time_steps']\n self._num_encoder_steps = get_fixed_params()['num_encoder_steps']\n<|end_body_0|>\n\n<|body_start_1|>\n print_info('Formatting train-valid-test splits.')\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n self.set_scalers(train)\n return (self.transform_inputs(data) for data in [train, valid, test])\n<|end_body_1|>\n\n<|body_start_2|>\n print_info('Setting scalers with training data...')\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n if len(sliced) >= self._time_steps:\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n self.identifiers = identifiers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\n column_definitions = self.getcolumn_definition()\n id_col = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n df_list = []\n for identifier, sliced in df.groupby(id_col):\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self.real_scalers[identifier].transform(sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\n output = pd.concat(df_list, axis=0)\n for col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\n return output\n<|end_body_3|>\n\n<|body_start_4|>\n column_definition = self.column_definition\n\n def _check_single_column(input_type):\n length = len([tup for tup in column_definition if tup[2] == input_type])\n if length != 1:\n raise ValueError('Illegal number of inputs ({}) of type {}'.format(length, input_type))\n _check_single_column(InputTypes.ID)\n _check_single_column(InputTypes.TIME)\n identifier = [tup for tup in column_definition if tup[2] == InputTypes.ID]\n time = [tup for tup in column_definition if tup[2] == InputTypes.TIME]\n real_inputs = [tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n categorical_inputs = [tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n return identifier + time + real_inputs + categorical_inputs\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000256", "length_bytes": 16393, "license_type": "permissive", "methods": [{"docstring": "Initialises formatter.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.", "name": "split_data", "signature": "def split_data(self, df, valid_boundary=1315, test_boundary=1339)"}, {"docstring": "Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.", "name": "set_scalers", "signature": "def set_scalers(self, df)"}, {"docstring": "Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.", "name": "transform_inputs", "signature": "def transform_inputs(self, df)"}, {"docstring": "\"Returns formatted column definition in order expected by the TFT.", "name": "getcolumn_definition", "signature": "def getcolumn_definition(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001758", "prompt": "Implement the Python class `ElectricityFormatter` described below.\n\nClass description:\nDefines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialises formatter.\n- def split_data(self, df, valid_boundary=1315, test_boundary=1339): Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\n- def set_scalers(self, df): Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\n- def transform_inputs(self, df): Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\n- def getcolumn_definition(self): \"Returns formatted column definition in order expected by the TFT.", "prompted_full_text": "Implement the Python class `ElectricityFormatter` described below.\n\nClass description:\nDefines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialises formatter.\n- def split_data(self, df, valid_boundary=1315, test_boundary=1339): Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\n- def set_scalers(self, df): Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\n- def transform_inputs(self, df): Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\n- def getcolumn_definition(self): \"Returns formatted column definition in order expected by the TFT.\n\n<|skeleton|>\nclass ElectricityFormatter:\n \"\"\"Defines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\"\"\"\n\n def __init__(self):\n \"\"\"Initialises formatter.\"\"\"\n <|body_0|>\n\n def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n \"\"\"Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\"\"\"\n <|body_1|>\n\n def set_scalers(self, df):\n \"\"\"Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\"\"\"\n <|body_2|>\n\n def transform_inputs(self, df):\n \"\"\"Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\"\"\"\n <|body_3|>\n\n def getcolumn_definition(self):\n \"\"\"\"Returns formatted column definition in order expected by the TFT.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.identifiers = None\n self._real_scalers = None\n self._cat_scalers = None\n self._target_scaler = None\n self._num_classes_per_cat_input = None\n self._time_steps = get_fixed_params()['total_time_steps']\n self._num_encoder_steps = get_fixed_params()['num_encoder_steps']\n<|end_body_0|>\n\n<|body_start_1|>\n print_info('Formatting train-valid-test splits.')\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n self.set_scalers(train)\n return (self.transform_inputs(data) for data in [train, valid, test])\n<|end_body_1|>\n\n<|body_start_2|>\n print_info('Setting scalers with training data...')\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n if len(sliced) >= self._time_steps:\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n self.identifiers = identifiers\n<|end_body_2|>\n\n<|body_start_3|>\n if self.real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\n column_definitions = self.getcolumn_definition()\n id_col = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n df_list = []\n for identifier, sliced in df.groupby(id_col):\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self.real_scalers[identifier].transform(sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\n output = pd.concat(df_list, axis=0)\n for col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\n return output\n<|end_body_3|>\n\n<|body_start_4|>\n column_definition = self.column_definition\n\n def _check_single_column(input_type):\n length = len([tup for tup in column_definition if tup[2] == input_type])\n if length != 1:\n raise ValueError('Illegal number of inputs ({}) of type {}'.format(length, input_type))\n _check_single_column(InputTypes.ID)\n _check_single_column(InputTypes.TIME)\n identifier = [tup for tup in column_definition if tup[2] == InputTypes.ID]\n time = [tup for tup in column_definition if tup[2] == InputTypes.TIME]\n real_inputs = [tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n categorical_inputs = [tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n return identifier + time + real_inputs + categorical_inputs\n<|end_body_4|>\n", "revision_id": "7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0", "skeleton": "<|skeleton|>\nclass ElectricityFormatter:\n \"\"\"Defines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\"\"\"\n\n def __init__(self):\n \"\"\"Initialises formatter.\"\"\"\n <|body_0|>\n\n def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n \"\"\"Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\"\"\"\n <|body_1|>\n\n def set_scalers(self, df):\n \"\"\"Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\"\"\"\n <|body_2|>\n\n def transform_inputs(self, df):\n \"\"\"Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\"\"\"\n <|body_3|>\n\n def getcolumn_definition(self):\n \"\"\"\"Returns formatted column definition in order expected by the TFT.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ElectricityFormatter:\n \"\"\"Defines and formats data for the electricity dataset. Note that per-entity z-score normalization is used here, and is implemented across functions. Attributes: column_definition: Defines input and data type of column used in the experiment. identifiers: Entity identifiers used in experiments.\"\"\"\n\n def __init__(self):\n \"\"\"Initialises formatter.\"\"\"\n self.identifiers = None\n self._real_scalers = None\n self._cat_scalers = None\n self._target_scaler = None\n self._num_classes_per_cat_input = None\n self._time_steps = get_fixed_params()['total_time_steps']\n self._num_encoder_steps = get_fixed_params()['num_encoder_steps']\n\n def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n \"\"\"Splits data frame into training-validation-test data frames. This also calibrates scaling object, and transforms data for each split. Args: df: Source data frame to split. valid_boundary: Starting year for validation data test_boundary: Starting year for test data Returns: Tuple of transformed (train, valid, test) data.\"\"\"\n print_info('Formatting train-valid-test splits.')\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n self.set_scalers(train)\n return (self.transform_inputs(data) for data in [train, valid, test])\n\n def set_scalers(self, df):\n \"\"\"Calibrates scalers using the data supplied. Args: df: Data to use to calibrate scalers.\"\"\"\n print_info('Setting scalers with training data...')\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n if len(sliced) >= self._time_steps:\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n self.identifiers = identifiers\n\n def transform_inputs(self, df):\n \"\"\"Performs feature transformations. This includes both feature engineering, preprocessing and normalisation. Args: df: Data frame to transform. Returns: Transformed data frame.\"\"\"\n if self.real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\n column_definitions = self.getcolumn_definition()\n id_col = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n real_inputs = extract_cols_from_data_type(DataTypes.REAL_VALUED, column_definitions, {InputTypes.ID, InputTypes.TIME})\n categorical_inputs = extract_cols_from_data_type(DataTypes.CATEGORICAL, column_definitions, {InputTypes.ID, InputTypes.TIME})\n df_list = []\n for identifier, sliced in df.groupby(id_col):\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self.real_scalers[identifier].transform(sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\n output = pd.concat(df_list, axis=0)\n for col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\n return output\n\n def getcolumn_definition(self):\n \"\"\"\"Returns formatted column definition in order expected by the TFT.\"\"\"\n column_definition = self.column_definition\n\n def _check_single_column(input_type):\n length = len([tup for tup in column_definition if tup[2] == input_type])\n if length != 1:\n raise ValueError('Illegal number of inputs ({}) of type {}'.format(length, input_type))\n _check_single_column(InputTypes.ID)\n _check_single_column(InputTypes.TIME)\n identifier = [tup for tup in column_definition if tup[2] == InputTypes.ID]\n time = [tup for tup in column_definition if tup[2] == InputTypes.TIME]\n real_inputs = [tup for tup in column_definition if tup[1] == DataTypes.REAL_VALUED and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n categorical_inputs = [tup for tup in column_definition if tup[1] == DataTypes.CATEGORICAL and tup[2] not in {InputTypes.ID, InputTypes.TIME}]\n return identifier + time + real_inputs + categorical_inputs\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/electricity_time_series_forecasting.py", "source_repo": "openvinotoolkit/open_model_zoo", "split": "val", "star_events_count": 1712} {"blob_id": "de8b437e01f3ff3dd6c204c6403e22aabcf4fec3", "bodies": ["self.env.revert_snapshot('deploy_kafka')\ntarget_node = {'slave-02': ['controller', self.settings.role_name]}\nself.helpers.remove_nodes_from_cluster(target_node)\nself.check_plugin_online()\nself.helpers.run_ostf()\nself.helpers.add_nodes_to_cluster(target_node)\nself.check_plugin_online()\nself.helpers.run_ostf()", "self.env.revert_snapshot('deploy_kafka')\ntarget_node = {'slave-05': ['compute', 'cinder']}\nself.helpers.remove_nodes_from_cluster(target_node)\nself.check_plugin_online()\nself.helpers.run_ostf()\nself.helpers.add_nodes_to_cluster(target_node)\nself.check_plugin_online()\nself.helpers.run_ostf()"], "bodies_text": "<|body_start_0|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-02': ['controller', self.settings.role_name]}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_0|>\n\n<|body_start_1|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-05': ['compute', 'cinder']}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_1|>\n", "class_docstring": "Class for system tests for Ceilometer-Redis plugin.", "class_name": "TestNodesKafkaPlugin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestNodesKafkaPlugin:\n \"\"\"Class for system tests for Ceilometer-Redis plugin.\"\"\"\n\n def add_remove_controller_kafka(self):\n \"\"\"Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_0|>\n\n def add_remove_compute_kafka(self):\n \"\"\"Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-02': ['controller', self.settings.role_name]}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_0|>\n\n<|body_start_1|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-05': ['compute', 'cinder']}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000257", "length_bytes": 3370, "license_type": "no_license", "methods": [{"docstring": "Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m", "name": "add_remove_controller_kafka", "signature": "def add_remove_controller_kafka(self)"}, {"docstring": "Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m", "name": "add_remove_compute_kafka", "signature": "def add_remove_compute_kafka(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004888", "prompt": "Implement the Python class `TestNodesKafkaPlugin` described below.\n\nClass description:\nClass for system tests for Ceilometer-Redis plugin.\n\nMethod signatures and docstrings:\n- def add_remove_controller_kafka(self): Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\n- def add_remove_compute_kafka(self): Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m", "prompted_full_text": "Implement the Python class `TestNodesKafkaPlugin` described below.\n\nClass description:\nClass for system tests for Ceilometer-Redis plugin.\n\nMethod signatures and docstrings:\n- def add_remove_controller_kafka(self): Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\n- def add_remove_compute_kafka(self): Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\n\n<|skeleton|>\nclass TestNodesKafkaPlugin:\n \"\"\"Class for system tests for Ceilometer-Redis plugin.\"\"\"\n\n def add_remove_controller_kafka(self):\n \"\"\"Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_0|>\n\n def add_remove_compute_kafka(self):\n \"\"\"Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-02': ['controller', self.settings.role_name]}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_0|>\n\n<|body_start_1|>\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-05': ['compute', 'cinder']}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n<|end_body_1|>\n", "revision_id": "179249df2d206eeabb3955c9dc8cb78cac3c36c6", "skeleton": "<|skeleton|>\nclass TestNodesKafkaPlugin:\n \"\"\"Class for system tests for Ceilometer-Redis plugin.\"\"\"\n\n def add_remove_controller_kafka(self):\n \"\"\"Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_0|>\n\n def add_remove_compute_kafka(self):\n \"\"\"Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestNodesKafkaPlugin:\n \"\"\"Class for system tests for Ceilometer-Redis plugin.\"\"\"\n\n def add_remove_controller_kafka(self):\n \"\"\"Verify that the number of controllers can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one controller node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-02': ['controller', self.settings.role_name]}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n\n def add_remove_compute_kafka(self):\n \"\"\"Verify that the number of computes can scale up and down Scenario: 1. Revert snapshot with 5 deployed nodes in HA configuration 2. Remove one controller node and redeploy the cluster 3. Check that Kafka is running 4. Run OSTF 5. Add one compute node (return previous state) and redeploy the cluster 6. Check that Kafka is running 7. Run OSTF Duration 120m\"\"\"\n self.env.revert_snapshot('deploy_kafka')\n target_node = {'slave-05': ['compute', 'cinder']}\n self.helpers.remove_nodes_from_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n self.helpers.add_nodes_to_cluster(target_node)\n self.check_plugin_online()\n self.helpers.run_ostf()\n", "source": "the_stack_v2_python_sparse", "source_path": "stacklight_tests/kafka/test_system.py", "source_repo": "rkhozinov/stacklight-integration-tests", "split": "val", "star_events_count": 1} {"blob_id": "e9026ed6ef1e56b0b6f19169b742fbe6354340b1", "bodies": ["for h in self._all:\n if h is not None:\n h.detach_()", "for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(sentStates.data.index_select(1, positions))"], "bodies_text": "<|body_start_0|>\n for h in self._all:\n if h is not None:\n h.detach_()\n<|end_body_0|>\n\n<|body_start_1|>\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(sentStates.data.index_select(1, positions))\n<|end_body_1|>\n", "class_docstring": "DecoderState is a base class for models, used during translation for storing translation states.", "class_name": "DecoderState", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DecoderState:\n \"\"\"DecoderState is a base class for models, used during translation for storing translation states.\"\"\"\n\n def detach(self):\n \"\"\"Detaches all Variables from the graph that created it, making it a leaf.\"\"\"\n <|body_0|>\n\n def beam_update(self, idx, positions, beam_size):\n \"\"\"Update when beam advances.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for h in self._all:\n if h is not None:\n h.detach_()\n<|end_body_0|>\n\n<|body_start_1|>\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(sentStates.data.index_select(1, positions))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000258", "length_bytes": 39461, "license_type": "no_license", "methods": [{"docstring": "Detaches all Variables from the graph that created it, making it a leaf.", "name": "detach", "signature": "def detach(self)"}, {"docstring": "Update when beam advances.", "name": "beam_update", "signature": "def beam_update(self, idx, positions, beam_size)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000151", "prompt": "Implement the Python class `DecoderState` described below.\n\nClass description:\nDecoderState is a base class for models, used during translation for storing translation states.\n\nMethod signatures and docstrings:\n- def detach(self): Detaches all Variables from the graph that created it, making it a leaf.\n- def beam_update(self, idx, positions, beam_size): Update when beam advances.", "prompted_full_text": "Implement the Python class `DecoderState` described below.\n\nClass description:\nDecoderState is a base class for models, used during translation for storing translation states.\n\nMethod signatures and docstrings:\n- def detach(self): Detaches all Variables from the graph that created it, making it a leaf.\n- def beam_update(self, idx, positions, beam_size): Update when beam advances.\n\n<|skeleton|>\nclass DecoderState:\n \"\"\"DecoderState is a base class for models, used during translation for storing translation states.\"\"\"\n\n def detach(self):\n \"\"\"Detaches all Variables from the graph that created it, making it a leaf.\"\"\"\n <|body_0|>\n\n def beam_update(self, idx, positions, beam_size):\n \"\"\"Update when beam advances.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for h in self._all:\n if h is not None:\n h.detach_()\n<|end_body_0|>\n\n<|body_start_1|>\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(sentStates.data.index_select(1, positions))\n<|end_body_1|>\n", "revision_id": "8b159fcbf1bc9faad5a2ef1c0690090037143899", "skeleton": "<|skeleton|>\nclass DecoderState:\n \"\"\"DecoderState is a base class for models, used during translation for storing translation states.\"\"\"\n\n def detach(self):\n \"\"\"Detaches all Variables from the graph that created it, making it a leaf.\"\"\"\n <|body_0|>\n\n def beam_update(self, idx, positions, beam_size):\n \"\"\"Update when beam advances.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DecoderState:\n \"\"\"DecoderState is a base class for models, used during translation for storing translation states.\"\"\"\n\n def detach(self):\n \"\"\"Detaches all Variables from the graph that created it, making it a leaf.\"\"\"\n for h in self._all:\n if h is not None:\n h.detach_()\n\n def beam_update(self, idx, positions, beam_size):\n \"\"\"Update when beam advances.\"\"\"\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(sentStates.data.index_select(1, positions))\n", "source": "the_stack_v2_python_sparse", "source_path": "disf_gen_coarse2fine/table/Models.py", "source_repo": "JingfengYang/Disfluency-Generation-and-Detection", "split": "val", "star_events_count": 5} {"blob_id": "8a5d39c88322e754dc29eebdcfab791e9aedbdae", "bodies": ["self.parser = RequestParser()\nself.parser.add_argument('location', type=str, required=True, help='location field is missing')\nself.parser.add_argument('images', type=str, required=True, help='Image field is missing')\nself.parser.add_argument('topic', type=str, required=True, help='Topic field is missing')\nself.parser.add_argument('happeningOn', type=str, required=True, help='Date field is missing')\nself.parser.add_argument('tags', type=str, required=True, help='Tags field seems to be missing')", "userId = current_user['userid']\nargs = self.parser.parse_args()\nargs = request.get_json()\nlocation = args['location']\nimages = args['images']\ntopic = args['topic']\nhappeningOn = args['happeningOn']\ntags = args['tags']\nif validate.valid_date(happeningOn):\n return validate.valid_date(happeningOn)\nif validate.valid_topic(topic):\n return validate.valid_topic(topic)\nif validate.valid_location(location):\n return validate.valid_location(location)\nif validate.valid_time(happeningOn):\n return validate.valid_time(happeningOn)\nmeet = MeetupModels.repeat_meetup(self, location, happeningOn)\nmeet = json.loads(meet)\nif meet:\n return ({'status': 409, 'error': 'a meetup with similar details exists'}, 409)\nmeetup = MeetupModels(userId, location, images, topic, happeningOn, tags)\nnewMeetup = meetup.create_meetup()\nnewMeetup = json.loads(newMeetup)\nreturn ({'status': 201, 'meetup': newMeetup}, 201)", "meetups = MeetupModels.get_all(self)\nmeetups = json.loads(meetups)\nif not meetups:\n return ({'status': 404, 'error': 'No meetups posted yet'}, 404)\nreturn ({'status': 200, 'data': meetups}, 200)"], "bodies_text": "<|body_start_0|>\n self.parser = RequestParser()\n self.parser.add_argument('location', type=str, required=True, help='location field is missing')\n self.parser.add_argument('images', type=str, required=True, help='Image field is missing')\n self.parser.add_argument('topic', type=str, required=True, help='Topic field is missing')\n self.parser.add_argument('happeningOn', type=str, required=True, help='Date field is missing')\n self.parser.add_argument('tags', type=str, required=True, help='Tags field seems to be missing')\n<|end_body_0|>\n\n<|body_start_1|>\n userId = current_user['userid']\n args = self.parser.parse_args()\n args = request.get_json()\n location = args['location']\n images = args['images']\n topic = args['topic']\n happeningOn = args['happeningOn']\n tags = args['tags']\n if validate.valid_date(happeningOn):\n return validate.valid_date(happeningOn)\n if validate.valid_topic(topic):\n return validate.valid_topic(topic)\n if validate.valid_location(location):\n return validate.valid_location(location)\n if validate.valid_time(happeningOn):\n return validate.valid_time(happeningOn)\n meet = MeetupModels.repeat_meetup(self, location, happeningOn)\n meet = json.loads(meet)\n if meet:\n return ({'status': 409, 'error': 'a meetup with similar details exists'}, 409)\n meetup = MeetupModels(userId, location, images, topic, happeningOn, tags)\n newMeetup = meetup.create_meetup()\n newMeetup = json.loads(newMeetup)\n return ({'status': 201, 'meetup': newMeetup}, 201)\n<|end_body_1|>\n\n<|body_start_2|>\n meetups = MeetupModels.get_all(self)\n meetups = json.loads(meetups)\n if not meetups:\n return ({'status': 404, 'error': 'No meetups posted yet'}, 404)\n return ({'status': 200, 'data': meetups}, 200)\n<|end_body_2|>\n", "class_docstring": "Class for meetup endpoints", "class_name": "AllMeetups", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AllMeetups:\n \"\"\"Class for meetup endpoints\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the meetup class\"\"\"\n <|body_0|>\n\n def post(self, current_user):\n \"\"\"Create meetup endpoint\"\"\"\n <|body_1|>\n\n def get(self, current_user):\n \"\"\"Fetch all meetups\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.parser = RequestParser()\n self.parser.add_argument('location', type=str, required=True, help='location field is missing')\n self.parser.add_argument('images', type=str, required=True, help='Image field is missing')\n self.parser.add_argument('topic', type=str, required=True, help='Topic field is missing')\n self.parser.add_argument('happeningOn', type=str, required=True, help='Date field is missing')\n self.parser.add_argument('tags', type=str, required=True, help='Tags field seems to be missing')\n<|end_body_0|>\n\n<|body_start_1|>\n userId = current_user['userid']\n args = self.parser.parse_args()\n args = request.get_json()\n location = args['location']\n images = args['images']\n topic = args['topic']\n happeningOn = args['happeningOn']\n tags = args['tags']\n if validate.valid_date(happeningOn):\n return validate.valid_date(happeningOn)\n if validate.valid_topic(topic):\n return validate.valid_topic(topic)\n if validate.valid_location(location):\n return validate.valid_location(location)\n if validate.valid_time(happeningOn):\n return validate.valid_time(happeningOn)\n meet = MeetupModels.repeat_meetup(self, location, happeningOn)\n meet = json.loads(meet)\n if meet:\n return ({'status': 409, 'error': 'a meetup with similar details exists'}, 409)\n meetup = MeetupModels(userId, location, images, topic, happeningOn, tags)\n newMeetup = meetup.create_meetup()\n newMeetup = json.loads(newMeetup)\n return ({'status': 201, 'meetup': newMeetup}, 201)\n<|end_body_1|>\n\n<|body_start_2|>\n meetups = MeetupModels.get_all(self)\n meetups = json.loads(meetups)\n if not meetups:\n return ({'status': 404, 'error': 'No meetups posted yet'}, 404)\n return ({'status': 200, 'data': meetups}, 200)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000259", "length_bytes": 5065, "license_type": "no_license", "methods": [{"docstring": "Initialize the meetup class", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Create meetup endpoint", "name": "post", "signature": "def post(self, current_user)"}, {"docstring": "Fetch all meetups", "name": "get", "signature": "def get(self, current_user)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000613", "prompt": "Implement the Python class `AllMeetups` described below.\n\nClass description:\nClass for meetup endpoints\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize the meetup class\n- def post(self, current_user): Create meetup endpoint\n- def get(self, current_user): Fetch all meetups", "prompted_full_text": "Implement the Python class `AllMeetups` described below.\n\nClass description:\nClass for meetup endpoints\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize the meetup class\n- def post(self, current_user): Create meetup endpoint\n- def get(self, current_user): Fetch all meetups\n\n<|skeleton|>\nclass AllMeetups:\n \"\"\"Class for meetup endpoints\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the meetup class\"\"\"\n <|body_0|>\n\n def post(self, current_user):\n \"\"\"Create meetup endpoint\"\"\"\n <|body_1|>\n\n def get(self, current_user):\n \"\"\"Fetch all meetups\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.parser = RequestParser()\n self.parser.add_argument('location', type=str, required=True, help='location field is missing')\n self.parser.add_argument('images', type=str, required=True, help='Image field is missing')\n self.parser.add_argument('topic', type=str, required=True, help='Topic field is missing')\n self.parser.add_argument('happeningOn', type=str, required=True, help='Date field is missing')\n self.parser.add_argument('tags', type=str, required=True, help='Tags field seems to be missing')\n<|end_body_0|>\n\n<|body_start_1|>\n userId = current_user['userid']\n args = self.parser.parse_args()\n args = request.get_json()\n location = args['location']\n images = args['images']\n topic = args['topic']\n happeningOn = args['happeningOn']\n tags = args['tags']\n if validate.valid_date(happeningOn):\n return validate.valid_date(happeningOn)\n if validate.valid_topic(topic):\n return validate.valid_topic(topic)\n if validate.valid_location(location):\n return validate.valid_location(location)\n if validate.valid_time(happeningOn):\n return validate.valid_time(happeningOn)\n meet = MeetupModels.repeat_meetup(self, location, happeningOn)\n meet = json.loads(meet)\n if meet:\n return ({'status': 409, 'error': 'a meetup with similar details exists'}, 409)\n meetup = MeetupModels(userId, location, images, topic, happeningOn, tags)\n newMeetup = meetup.create_meetup()\n newMeetup = json.loads(newMeetup)\n return ({'status': 201, 'meetup': newMeetup}, 201)\n<|end_body_1|>\n\n<|body_start_2|>\n meetups = MeetupModels.get_all(self)\n meetups = json.loads(meetups)\n if not meetups:\n return ({'status': 404, 'error': 'No meetups posted yet'}, 404)\n return ({'status': 200, 'data': meetups}, 200)\n<|end_body_2|>\n", "revision_id": "93c7aeb54c240b6312e6164859acd2c878e85825", "skeleton": "<|skeleton|>\nclass AllMeetups:\n \"\"\"Class for meetup endpoints\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the meetup class\"\"\"\n <|body_0|>\n\n def post(self, current_user):\n \"\"\"Create meetup endpoint\"\"\"\n <|body_1|>\n\n def get(self, current_user):\n \"\"\"Fetch all meetups\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AllMeetups:\n \"\"\"Class for meetup endpoints\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the meetup class\"\"\"\n self.parser = RequestParser()\n self.parser.add_argument('location', type=str, required=True, help='location field is missing')\n self.parser.add_argument('images', type=str, required=True, help='Image field is missing')\n self.parser.add_argument('topic', type=str, required=True, help='Topic field is missing')\n self.parser.add_argument('happeningOn', type=str, required=True, help='Date field is missing')\n self.parser.add_argument('tags', type=str, required=True, help='Tags field seems to be missing')\n\n def post(self, current_user):\n \"\"\"Create meetup endpoint\"\"\"\n userId = current_user['userid']\n args = self.parser.parse_args()\n args = request.get_json()\n location = args['location']\n images = args['images']\n topic = args['topic']\n happeningOn = args['happeningOn']\n tags = args['tags']\n if validate.valid_date(happeningOn):\n return validate.valid_date(happeningOn)\n if validate.valid_topic(topic):\n return validate.valid_topic(topic)\n if validate.valid_location(location):\n return validate.valid_location(location)\n if validate.valid_time(happeningOn):\n return validate.valid_time(happeningOn)\n meet = MeetupModels.repeat_meetup(self, location, happeningOn)\n meet = json.loads(meet)\n if meet:\n return ({'status': 409, 'error': 'a meetup with similar details exists'}, 409)\n meetup = MeetupModels(userId, location, images, topic, happeningOn, tags)\n newMeetup = meetup.create_meetup()\n newMeetup = json.loads(newMeetup)\n return ({'status': 201, 'meetup': newMeetup}, 201)\n\n def get(self, current_user):\n \"\"\"Fetch all meetups\"\"\"\n meetups = MeetupModels.get_all(self)\n meetups = json.loads(meetups)\n if not meetups:\n return ({'status': 404, 'error': 'No meetups posted yet'}, 404)\n return ({'status': 200, 'data': meetups}, 200)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/api/v2/views/meetup_views.py", "source_repo": "matthenge/Questioner-api-v2", "split": "val", "star_events_count": 0} {"blob_id": "0a988ab8be2eca97af790a6bc193eeee092c90ba", "bodies": ["m, n = (len(obstacleGrid), len(obstacleGrid[0]))\ndp = [[0 for _ in range(n)] for _ in range(m)]\ndp[0][0] = 0 if obstacleGrid[0][0] else 1\nfor i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[i][j] = 0\n else:\n if i:\n dp[i][j] += dp[i - 1][j]\n if j:\n dp[i][j] += dp[i][j - 1]\nreturn dp[-1][-1]", "m, n = (len(obstacleGrid), len(obstacleGrid[0]))\ndp = [0] * n\ndp[0] = 0 if obstacleGrid[0][0] else 1\nfor i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[j] = 0\n elif j and obstacleGrid[i][j - 1] == 0:\n dp[j] += dp[j - 1]\nreturn dp[n - 1]"], "bodies_text": "<|body_start_0|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[i][j] = 0\n else:\n if i:\n dp[i][j] += dp[i - 1][j]\n if j:\n dp[i][j] += dp[i][j - 1]\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [0] * n\n dp[0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[j] = 0\n elif j and obstacleGrid[i][j - 1] == 0:\n dp[j] += dp[j - 1]\n return dp[n - 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"典型DP\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[i][j] = 0\n else:\n if i:\n dp[i][j] += dp[i - 1][j]\n if j:\n dp[i][j] += dp[i][j - 1]\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [0] * n\n dp[0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[j] = 0\n elif j and obstacleGrid[i][j - 1] == 0:\n dp[j] += dp[j - 1]\n return dp[n - 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000260", "length_bytes": 1748, "license_type": "no_license", "methods": [{"docstring": "典型DP", "name": "uniquePathsWithObstacles1", "signature": "def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int"}, {"docstring": "二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell", "name": "uniquePathsWithObstacles2", "signature": "def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int: 典型DP\n- def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int: 二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int: 典型DP\n- def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int: 二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell\n\n<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"典型DP\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[i][j] = 0\n else:\n if i:\n dp[i][j] += dp[i - 1][j]\n if j:\n dp[i][j] += dp[i][j - 1]\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [0] * n\n dp[0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[j] = 0\n elif j and obstacleGrid[i][j - 1] == 0:\n dp[j] += dp[j - 1]\n return dp[n - 1]\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"典型DP\"\"\"\n <|body_0|>\n\n def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def uniquePathsWithObstacles1(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"典型DP\"\"\"\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[i][j] = 0\n else:\n if i:\n dp[i][j] += dp[i - 1][j]\n if j:\n dp[i][j] += dp[i][j - 1]\n return dp[-1][-1]\n\n def uniquePathsWithObstacles2(self, obstacleGrid: List[List[int]]) -> int:\n \"\"\"二维数组压缩成一维数组 dp[j] = dp[j] + dp[j - 1] new dp[j] = old dp[j] + dp[j-1] current cell = top cell + left cell\"\"\"\n m, n = (len(obstacleGrid), len(obstacleGrid[0]))\n dp = [0] * n\n dp[0] = 0 if obstacleGrid[0][0] else 1\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j]:\n dp[j] = 0\n elif j and obstacleGrid[i][j - 1] == 0:\n dp[j] += dp[j - 1]\n return dp[n - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "063_unique-paths-ii.py", "source_repo": "helloocc/algorithm", "split": "val", "star_events_count": 1} {"blob_id": "bbe3a6b06258fd0296926f68f0b2b50646e87392", "bodies": ["super().__init__()\nself.factory = factory\nself.task_queue = task_queue\nself.done_queue = done_queue\nself.batch_queue = batch_queue\nif args is None:\n self.args = []\nelse:\n self.args = args\nif kwargs is None:\n self.kwargs = []\nelse:\n self.kwargs = kwargs", "super().run()\nwhile True:\n filename = self.task_queue.get()\n if filename is None:\n break\n try:\n dataset = self.factory(filename, *self.args, **self.kwargs)\n if isinstance(dataset, Iterable):\n for b in dataset:\n self.batch_queue.put(b)\n elif hasattr(dataset, '__len__') and hasattr(dataset, '__getitem__'):\n for i in range(len(dataset)):\n self.batch_queue.put(dataset[i])\n else:\n raise TypeError(\"Provided dataset is neither iterable nor does it implement '__getitem__' and '__len__' methods.\")\n except Exception as e:\n _LOGGER.error('Error encountered in dataset loader: %s', e)\n self.done_queue.put(filename)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.factory = factory\n self.task_queue = task_queue\n self.done_queue = done_queue\n self.batch_queue = batch_queue\n if args is None:\n self.args = []\n else:\n self.args = args\n if kwargs is None:\n self.kwargs = []\n else:\n self.kwargs = kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n super().run()\n while True:\n filename = self.task_queue.get()\n if filename is None:\n break\n try:\n dataset = self.factory(filename, *self.args, **self.kwargs)\n if isinstance(dataset, Iterable):\n for b in dataset:\n self.batch_queue.put(b)\n elif hasattr(dataset, '__len__') and hasattr(dataset, '__getitem__'):\n for i in range(len(dataset)):\n self.batch_queue.put(dataset[i])\n else:\n raise TypeError(\"Provided dataset is neither iterable nor does it implement '__getitem__' and '__len__' methods.\")\n except Exception as e:\n _LOGGER.error('Error encountered in dataset loader: %s', e)\n self.done_queue.put(filename)\n<|end_body_1|>\n", "class_docstring": "The active dataset class takes care of concurrent reading of data from a dataset.", "class_name": "DatasetLoader", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatasetLoader:\n \"\"\"The active dataset class takes care of concurrent reading of data from a dataset.\"\"\"\n\n def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None):\n \"\"\"Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Open dataset and start loading batches.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.factory = factory\n self.task_queue = task_queue\n self.done_queue = done_queue\n self.batch_queue = batch_queue\n if args is None:\n self.args = []\n else:\n self.args = args\n if kwargs is None:\n self.kwargs = []\n else:\n self.kwargs = kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n super().run()\n while True:\n filename = self.task_queue.get()\n if filename is None:\n break\n try:\n dataset = self.factory(filename, *self.args, **self.kwargs)\n if isinstance(dataset, Iterable):\n for b in dataset:\n self.batch_queue.put(b)\n elif hasattr(dataset, '__len__') and hasattr(dataset, '__getitem__'):\n for i in range(len(dataset)):\n self.batch_queue.put(dataset[i])\n else:\n raise TypeError(\"Provided dataset is neither iterable nor does it implement '__getitem__' and '__len__' methods.\")\n except Exception as e:\n _LOGGER.error('Error encountered in dataset loader: %s', e)\n self.done_queue.put(filename)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000261", "length_bytes": 17927, "license_type": "permissive", "methods": [{"docstring": "Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.", "name": "__init__", "signature": "def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None)"}, {"docstring": "Open dataset and start loading batches.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003052", "prompt": "Implement the Python class `DatasetLoader` described below.\n\nClass description:\nThe active dataset class takes care of concurrent reading of data from a dataset.\n\nMethod signatures and docstrings:\n- def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None): Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\n- def run(self): Open dataset and start loading batches.", "prompted_full_text": "Implement the Python class `DatasetLoader` described below.\n\nClass description:\nThe active dataset class takes care of concurrent reading of data from a dataset.\n\nMethod signatures and docstrings:\n- def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None): Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\n- def run(self): Open dataset and start loading batches.\n\n<|skeleton|>\nclass DatasetLoader:\n \"\"\"The active dataset class takes care of concurrent reading of data from a dataset.\"\"\"\n\n def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None):\n \"\"\"Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Open dataset and start loading batches.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.factory = factory\n self.task_queue = task_queue\n self.done_queue = done_queue\n self.batch_queue = batch_queue\n if args is None:\n self.args = []\n else:\n self.args = args\n if kwargs is None:\n self.kwargs = []\n else:\n self.kwargs = kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n super().run()\n while True:\n filename = self.task_queue.get()\n if filename is None:\n break\n try:\n dataset = self.factory(filename, *self.args, **self.kwargs)\n if isinstance(dataset, Iterable):\n for b in dataset:\n self.batch_queue.put(b)\n elif hasattr(dataset, '__len__') and hasattr(dataset, '__getitem__'):\n for i in range(len(dataset)):\n self.batch_queue.put(dataset[i])\n else:\n raise TypeError(\"Provided dataset is neither iterable nor does it implement '__getitem__' and '__len__' methods.\")\n except Exception as e:\n _LOGGER.error('Error encountered in dataset loader: %s', e)\n self.done_queue.put(filename)\n<|end_body_1|>\n", "revision_id": "a27e329cd30337995c359160a0d878bf331c13fb", "skeleton": "<|skeleton|>\nclass DatasetLoader:\n \"\"\"The active dataset class takes care of concurrent reading of data from a dataset.\"\"\"\n\n def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None):\n \"\"\"Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Open dataset and start loading batches.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DatasetLoader:\n \"\"\"The active dataset class takes care of concurrent reading of data from a dataset.\"\"\"\n\n def __init__(self, factory, task_queue, done_queue, batch_queue, args=None, kwargs=None):\n \"\"\"Args: factory: Class or factory function to use to open the dataset. filename: Filename of the dataset file to open. batch_queue: Queue on which to put the loaded batches. args: List of positional arguments to pass to the dataset factory following the dataset name. kwargs: Dictionary of keyword arguments to pass to the dataset factory following the dataset name.\"\"\"\n super().__init__()\n self.factory = factory\n self.task_queue = task_queue\n self.done_queue = done_queue\n self.batch_queue = batch_queue\n if args is None:\n self.args = []\n else:\n self.args = args\n if kwargs is None:\n self.kwargs = []\n else:\n self.kwargs = kwargs\n\n def run(self):\n \"\"\"Open dataset and start loading batches.\"\"\"\n super().run()\n while True:\n filename = self.task_queue.get()\n if filename is None:\n break\n try:\n dataset = self.factory(filename, *self.args, **self.kwargs)\n if isinstance(dataset, Iterable):\n for b in dataset:\n self.batch_queue.put(b)\n elif hasattr(dataset, '__len__') and hasattr(dataset, '__getitem__'):\n for i in range(len(dataset)):\n self.batch_queue.put(dataset[i])\n else:\n raise TypeError(\"Provided dataset is neither iterable nor does it implement '__getitem__' and '__len__' methods.\")\n except Exception as e:\n _LOGGER.error('Error encountered in dataset loader: %s', e)\n self.done_queue.put(filename)\n", "source": "the_stack_v2_python_sparse", "source_path": "quantnn/data.py", "source_repo": "simonpf/quantnn", "split": "val", "star_events_count": 7} {"blob_id": "9b3e4fb6f9b6641c115f1d04f49acd7cc338edb0", "bodies": ["ret = []\n\ndef doSerialize(root):\n if root == None:\n ret.append('None')\n return\n ret.append(str(root.val))\n ret.append(str(len(root.children) if root.children else 0))\n for i in range(len(root.children)):\n doSerialize(root.children[i])\ndoSerialize(root)\nprint(','.join(ret))\nreturn ','.join(ret)", "token = [int(t) if t != 'None' else None for t in data.split(',')]\nprint(token)\n\ndef doDeserialize(token):\n if token[0] == None:\n token.pop(0)\n return None\n value = token.pop(0)\n child_len = token.pop(0)\n node = Node(value, [None] * child_len)\n for i in range(child_len):\n node.children[i] = doDeserialize(token)\n return node\nreturn doDeserialize(token)"], "bodies_text": "<|body_start_0|>\n ret = []\n\n def doSerialize(root):\n if root == None:\n ret.append('None')\n return\n ret.append(str(root.val))\n ret.append(str(len(root.children) if root.children else 0))\n for i in range(len(root.children)):\n doSerialize(root.children[i])\n doSerialize(root)\n print(','.join(ret))\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n token = [int(t) if t != 'None' else None for t in data.split(',')]\n print(token)\n\n def doDeserialize(token):\n if token[0] == None:\n token.pop(0)\n return None\n value = token.pop(0)\n child_len = token.pop(0)\n node = Node(value, [None] * child_len)\n for i in range(child_len):\n node.children[i] = doDeserialize(token)\n return node\n return doDeserialize(token)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n\n def doSerialize(root):\n if root == None:\n ret.append('None')\n return\n ret.append(str(root.val))\n ret.append(str(len(root.children) if root.children else 0))\n for i in range(len(root.children)):\n doSerialize(root.children[i])\n doSerialize(root)\n print(','.join(ret))\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n token = [int(t) if t != 'None' else None for t in data.split(',')]\n print(token)\n\n def doDeserialize(token):\n if token[0] == None:\n token.pop(0)\n return None\n value = token.pop(0)\n child_len = token.pop(0)\n node = Node(value, [None] * child_len)\n for i in range(child_len):\n node.children[i] = doDeserialize(token)\n return node\n return doDeserialize(token)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000262", "length_bytes": 1444, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root: 'Node') -> str"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data: str) -> 'Node'"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000332", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n\n def doSerialize(root):\n if root == None:\n ret.append('None')\n return\n ret.append(str(root.val))\n ret.append(str(len(root.children) if root.children else 0))\n for i in range(len(root.children)):\n doSerialize(root.children[i])\n doSerialize(root)\n print(','.join(ret))\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n token = [int(t) if t != 'None' else None for t in data.split(',')]\n print(token)\n\n def doDeserialize(token):\n if token[0] == None:\n token.pop(0)\n return None\n value = token.pop(0)\n child_len = token.pop(0)\n node = Node(value, [None] * child_len)\n for i in range(child_len):\n node.children[i] = doDeserialize(token)\n return node\n return doDeserialize(token)\n<|end_body_1|>\n", "revision_id": "fe30d8ca54309caff975684648495ea953022048", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n ret = []\n\n def doSerialize(root):\n if root == None:\n ret.append('None')\n return\n ret.append(str(root.val))\n ret.append(str(len(root.children) if root.children else 0))\n for i in range(len(root.children)):\n doSerialize(root.children[i])\n doSerialize(root)\n print(','.join(ret))\n return ','.join(ret)\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n token = [int(t) if t != 'None' else None for t in data.split(',')]\n print(token)\n\n def doDeserialize(token):\n if token[0] == None:\n token.pop(0)\n return None\n value = token.pop(0)\n child_len = token.pop(0)\n node = Node(value, [None] * child_len)\n for i in range(child_len):\n node.children[i] = doDeserialize(token)\n return node\n return doDeserialize(token)\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithm/leetCode/0428_serialize_and_deserialize_N-ary_Tree.py", "source_repo": "dictator-x/practise_as", "split": "val", "star_events_count": 0} {"blob_id": "fc172eb3354166984cc3c6c08ee8c531dfccc707", "bodies": ["project = kwargs.pop('project', None)\nsuper(self.__class__, self).__init__(*args, **kwargs)\nself.fields['parent'].queryset = Task.objects.filter(project=project)\nself.fields['type'].queryset = Type.objects.filter(is_project_type=True)\nself.fields['owner'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\ntry:\n self.fields['milestone'].queryset = project.milestone.filter(category=Milestone.ENGINEERING)\n self.fields['assigned_resources'].queryset = project.team.filter(is_active=True, is_staff=False).order_by('username')\nexcept BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')", "task = super(TaskForm, self).save(commit=False)\ntask.project = project\ntask.editor = user\nif not task.id:\n task.author = user\n task.created_at = datetime.now()\nif commit:\n task.save()\n\ndef assign_resource(resource):\n return task.assigned_resources.add(resource)\nmap(assign_resource, self.cleaned_data['assigned_resources'])\nreturn task"], "bodies_text": "<|body_start_0|>\n project = kwargs.pop('project', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['parent'].queryset = Task.objects.filter(project=project)\n self.fields['type'].queryset = Type.objects.filter(is_project_type=True)\n self.fields['owner'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n try:\n self.fields['milestone'].queryset = project.milestone.filter(category=Milestone.ENGINEERING)\n self.fields['assigned_resources'].queryset = project.team.filter(is_active=True, is_staff=False).order_by('username')\n except BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n<|end_body_0|>\n\n<|body_start_1|>\n task = super(TaskForm, self).save(commit=False)\n task.project = project\n task.editor = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n\n def assign_resource(resource):\n return task.assigned_resources.add(resource)\n map(assign_resource, self.cleaned_data['assigned_resources'])\n return task\n<|end_body_1|>\n", "class_docstring": "Form representing task model", "class_name": "TaskForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TaskForm:\n \"\"\"Form representing task model\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden init method to have add project related data to fields\"\"\"\n <|body_0|>\n\n def save(self, user, project, commit=True):\n \"\"\"Overriden save method to save virtual field which are not displayed to user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n project = kwargs.pop('project', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['parent'].queryset = Task.objects.filter(project=project)\n self.fields['type'].queryset = Type.objects.filter(is_project_type=True)\n self.fields['owner'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n try:\n self.fields['milestone'].queryset = project.milestone.filter(category=Milestone.ENGINEERING)\n self.fields['assigned_resources'].queryset = project.team.filter(is_active=True, is_staff=False).order_by('username')\n except BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n<|end_body_0|>\n\n<|body_start_1|>\n task = super(TaskForm, self).save(commit=False)\n task.project = project\n task.editor = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n\n def assign_resource(resource):\n return task.assigned_resources.add(resource)\n map(assign_resource, self.cleaned_data['assigned_resources'])\n return task\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000263", "length_bytes": 5361, "license_type": "no_license", "methods": [{"docstring": "Overriden init method to have add project related data to fields", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Overriden save method to save virtual field which are not displayed to user", "name": "save", "signature": "def save(self, user, project, commit=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006504", "prompt": "Implement the Python class `TaskForm` described below.\n\nClass description:\nForm representing task model\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Overriden init method to have add project related data to fields\n- def save(self, user, project, commit=True): Overriden save method to save virtual field which are not displayed to user", "prompted_full_text": "Implement the Python class `TaskForm` described below.\n\nClass description:\nForm representing task model\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Overriden init method to have add project related data to fields\n- def save(self, user, project, commit=True): Overriden save method to save virtual field which are not displayed to user\n\n<|skeleton|>\nclass TaskForm:\n \"\"\"Form representing task model\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden init method to have add project related data to fields\"\"\"\n <|body_0|>\n\n def save(self, user, project, commit=True):\n \"\"\"Overriden save method to save virtual field which are not displayed to user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n project = kwargs.pop('project', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['parent'].queryset = Task.objects.filter(project=project)\n self.fields['type'].queryset = Type.objects.filter(is_project_type=True)\n self.fields['owner'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n try:\n self.fields['milestone'].queryset = project.milestone.filter(category=Milestone.ENGINEERING)\n self.fields['assigned_resources'].queryset = project.team.filter(is_active=True, is_staff=False).order_by('username')\n except BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n<|end_body_0|>\n\n<|body_start_1|>\n task = super(TaskForm, self).save(commit=False)\n task.project = project\n task.editor = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n\n def assign_resource(resource):\n return task.assigned_resources.add(resource)\n map(assign_resource, self.cleaned_data['assigned_resources'])\n return task\n<|end_body_1|>\n", "revision_id": "7a337e0e3a20180b9564de68ab22620dc9aa1a36", "skeleton": "<|skeleton|>\nclass TaskForm:\n \"\"\"Form representing task model\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden init method to have add project related data to fields\"\"\"\n <|body_0|>\n\n def save(self, user, project, commit=True):\n \"\"\"Overriden save method to save virtual field which are not displayed to user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TaskForm:\n \"\"\"Form representing task model\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden init method to have add project related data to fields\"\"\"\n project = kwargs.pop('project', None)\n super(self.__class__, self).__init__(*args, **kwargs)\n self.fields['parent'].queryset = Task.objects.filter(project=project)\n self.fields['type'].queryset = Type.objects.filter(is_project_type=True)\n self.fields['owner'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n try:\n self.fields['milestone'].queryset = project.milestone.filter(category=Milestone.ENGINEERING)\n self.fields['assigned_resources'].queryset = project.team.filter(is_active=True, is_staff=False).order_by('username')\n except BaseException:\n self.fields['assigned_resources'].queryset = User.objects.filter(is_active=True, is_staff=False).order_by('username')\n\n def save(self, user, project, commit=True):\n \"\"\"Overriden save method to save virtual field which are not displayed to user\"\"\"\n task = super(TaskForm, self).save(commit=False)\n task.project = project\n task.editor = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n\n def assign_resource(resource):\n return task.assigned_resources.add(resource)\n map(assign_resource, self.cleaned_data['assigned_resources'])\n return task\n", "source": "the_stack_v2_python_sparse", "source_path": "project_management/tasks/forms.py", "source_repo": "raveena17/ILASM", "split": "val", "star_events_count": 0} {"blob_id": "ecbde05a244375c231cf86db7fa2c6744efdcef8", "bodies": ["strflag = ''\nfor ibit, name in enumerate(self.name):\n if np.uint64(val) & np.uint64(2 ** ibit) > 0 and (level == 0 or self.level == level):\n strflag = strflag + name + ','\nif strip:\n return strflag.strip(',')\nelse:\n return strflag", "if type(name) is str:\n name = [name]\nbitval = np.uint64(0)\nfor n in name:\n try:\n j = self.name.index(n.strip())\n bitval |= np.uint64(2 ** j)\n except:\n print('WARNING: undefined name: ', n)\nreturn bitval", "val = np.uint64(0)\nfor i, level in enumerate(self.level):\n if level == 1:\n val = val | np.uint64(2 ** i)\nreturn val", "val = np.uint64(0)\nfor i, level in enumerate(self.level):\n if level == 2:\n val = val | np.uint64(2 ** i)\nreturn val"], "bodies_text": "<|body_start_0|>\n strflag = ''\n for ibit, name in enumerate(self.name):\n if np.uint64(val) & np.uint64(2 ** ibit) > 0 and (level == 0 or self.level == level):\n strflag = strflag + name + ','\n if strip:\n return strflag.strip(',')\n else:\n return strflag\n<|end_body_0|>\n\n<|body_start_1|>\n if type(name) is str:\n name = [name]\n bitval = np.uint64(0)\n for n in name:\n try:\n j = self.name.index(n.strip())\n bitval |= np.uint64(2 ** j)\n except:\n print('WARNING: undefined name: ', n)\n return bitval\n<|end_body_1|>\n\n<|body_start_2|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 1:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_2|>\n\n<|body_start_3|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 2:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_3|>\n", "class_docstring": "Base class for bitmasks.", "class_name": "BitMask", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BitMask:\n \"\"\"Base class for bitmasks.\"\"\"\n\n def get_name(self, val, level=0, strip=True):\n \"\"\"Given input value, returns names of all set bits, optionally of a given level\"\"\"\n <|body_0|>\n\n def get_value(self, name):\n \"\"\"Get the numerical bit value of a given character name(s)\"\"\"\n <|body_1|>\n\n def bad_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_2|>\n\n def warn_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n strflag = ''\n for ibit, name in enumerate(self.name):\n if np.uint64(val) & np.uint64(2 ** ibit) > 0 and (level == 0 or self.level == level):\n strflag = strflag + name + ','\n if strip:\n return strflag.strip(',')\n else:\n return strflag\n<|end_body_0|>\n\n<|body_start_1|>\n if type(name) is str:\n name = [name]\n bitval = np.uint64(0)\n for n in name:\n try:\n j = self.name.index(n.strip())\n bitval |= np.uint64(2 ** j)\n except:\n print('WARNING: undefined name: ', n)\n return bitval\n<|end_body_1|>\n\n<|body_start_2|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 1:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_2|>\n\n<|body_start_3|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 2:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000264", "length_bytes": 1714, "license_type": "permissive", "methods": [{"docstring": "Given input value, returns names of all set bits, optionally of a given level", "name": "get_name", "signature": "def get_name(self, val, level=0, strip=True)"}, {"docstring": "Get the numerical bit value of a given character name(s)", "name": "get_value", "signature": "def get_value(self, name)"}, {"docstring": "Return bitmask value of all bits that indicate BAD in input bitmask", "name": "bad_value", "signature": "def bad_value(self)"}, {"docstring": "Return bitmask value of all bits that indicate BAD in input bitmask", "name": "warn_value", "signature": "def warn_value(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000595", "prompt": "Implement the Python class `BitMask` described below.\n\nClass description:\nBase class for bitmasks.\n\nMethod signatures and docstrings:\n- def get_name(self, val, level=0, strip=True): Given input value, returns names of all set bits, optionally of a given level\n- def get_value(self, name): Get the numerical bit value of a given character name(s)\n- def bad_value(self): Return bitmask value of all bits that indicate BAD in input bitmask\n- def warn_value(self): Return bitmask value of all bits that indicate BAD in input bitmask", "prompted_full_text": "Implement the Python class `BitMask` described below.\n\nClass description:\nBase class for bitmasks.\n\nMethod signatures and docstrings:\n- def get_name(self, val, level=0, strip=True): Given input value, returns names of all set bits, optionally of a given level\n- def get_value(self, name): Get the numerical bit value of a given character name(s)\n- def bad_value(self): Return bitmask value of all bits that indicate BAD in input bitmask\n- def warn_value(self): Return bitmask value of all bits that indicate BAD in input bitmask\n\n<|skeleton|>\nclass BitMask:\n \"\"\"Base class for bitmasks.\"\"\"\n\n def get_name(self, val, level=0, strip=True):\n \"\"\"Given input value, returns names of all set bits, optionally of a given level\"\"\"\n <|body_0|>\n\n def get_value(self, name):\n \"\"\"Get the numerical bit value of a given character name(s)\"\"\"\n <|body_1|>\n\n def bad_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_2|>\n\n def warn_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n strflag = ''\n for ibit, name in enumerate(self.name):\n if np.uint64(val) & np.uint64(2 ** ibit) > 0 and (level == 0 or self.level == level):\n strflag = strflag + name + ','\n if strip:\n return strflag.strip(',')\n else:\n return strflag\n<|end_body_0|>\n\n<|body_start_1|>\n if type(name) is str:\n name = [name]\n bitval = np.uint64(0)\n for n in name:\n try:\n j = self.name.index(n.strip())\n bitval |= np.uint64(2 ** j)\n except:\n print('WARNING: undefined name: ', n)\n return bitval\n<|end_body_1|>\n\n<|body_start_2|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 1:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_2|>\n\n<|body_start_3|>\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 2:\n val = val | np.uint64(2 ** i)\n return val\n<|end_body_3|>\n", "revision_id": "3efe111c80d95b46e2f07288e98e6ee10cbcac9b", "skeleton": "<|skeleton|>\nclass BitMask:\n \"\"\"Base class for bitmasks.\"\"\"\n\n def get_name(self, val, level=0, strip=True):\n \"\"\"Given input value, returns names of all set bits, optionally of a given level\"\"\"\n <|body_0|>\n\n def get_value(self, name):\n \"\"\"Get the numerical bit value of a given character name(s)\"\"\"\n <|body_1|>\n\n def bad_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_2|>\n\n def warn_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BitMask:\n \"\"\"Base class for bitmasks.\"\"\"\n\n def get_name(self, val, level=0, strip=True):\n \"\"\"Given input value, returns names of all set bits, optionally of a given level\"\"\"\n strflag = ''\n for ibit, name in enumerate(self.name):\n if np.uint64(val) & np.uint64(2 ** ibit) > 0 and (level == 0 or self.level == level):\n strflag = strflag + name + ','\n if strip:\n return strflag.strip(',')\n else:\n return strflag\n\n def get_value(self, name):\n \"\"\"Get the numerical bit value of a given character name(s)\"\"\"\n if type(name) is str:\n name = [name]\n bitval = np.uint64(0)\n for n in name:\n try:\n j = self.name.index(n.strip())\n bitval |= np.uint64(2 ** j)\n except:\n print('WARNING: undefined name: ', n)\n return bitval\n\n def bad_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 1:\n val = val | np.uint64(2 ** i)\n return val\n\n def warn_value(self):\n \"\"\"Return bitmask value of all bits that indicate BAD in input bitmask\"\"\"\n val = np.uint64(0)\n for i, level in enumerate(self.level):\n if level == 2:\n val = val | np.uint64(2 ** i)\n return val\n", "source": "the_stack_v2_python_sparse", "source_path": "python/astra/tools/bitmask.py", "source_repo": "sdss/astra", "split": "val", "star_events_count": 8} {"blob_id": "4245ade607ca2fd2e1b5250fa863d965c8f1c933", "bodies": ["if not xml:\n if xml == '':\n xml = 'empty'\n raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)\nself.xml = xml\nself.content = self.parse_xml(xml, root)", "if isinstance(xml, str) or isinstance(xml, bytes):\n dom = etree.XML(xml)\nelse:\n dom = etree.XML(xml)\nif root:\n return dom.find(root)\nelse:\n return dom", "elem = self.content.find(tag)\nif elem is not None:\n return elem.text\nreturn None"], "bodies_text": "<|body_start_0|>\n if not xml:\n if xml == '':\n xml = 'empty'\n raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)\n self.xml = xml\n self.content = self.parse_xml(xml, root)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(xml, str) or isinstance(xml, bytes):\n dom = etree.XML(xml)\n else:\n dom = etree.XML(xml)\n if root:\n return dom.find(root)\n else:\n return dom\n<|end_body_1|>\n\n<|body_start_2|>\n elem = self.content.find(tag)\n if elem is not None:\n return elem.text\n return None\n<|end_body_2|>\n", "class_docstring": "Base class for XML parsing objects (e.g. PubMedArticle)", "class_name": "MetaPubObject", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetaPubObject:\n \"\"\"Base class for XML parsing objects (e.g. PubMedArticle)\"\"\"\n\n def __init__(self, xml, root=None, *args, **kwargs):\n \"\"\"Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\"\"\"\n <|body_0|>\n\n def parse_xml(xml, root=None):\n \"\"\"Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\"\"\"\n <|body_1|>\n\n def _get(self, tag):\n \"\"\"Returns content of named XML element, or None if not found.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not xml:\n if xml == '':\n xml = 'empty'\n raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)\n self.xml = xml\n self.content = self.parse_xml(xml, root)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(xml, str) or isinstance(xml, bytes):\n dom = etree.XML(xml)\n else:\n dom = etree.XML(xml)\n if root:\n return dom.find(root)\n else:\n return dom\n<|end_body_1|>\n\n<|body_start_2|>\n elem = self.content.find(tag)\n if elem is not None:\n return elem.text\n return None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000265", "length_bytes": 2868, "license_type": "permissive", "methods": [{"docstring": "Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).", "name": "__init__", "signature": "def __init__(self, xml, root=None, *args, **kwargs)"}, {"docstring": "Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.", "name": "parse_xml", "signature": "def parse_xml(xml, root=None)"}, {"docstring": "Returns content of named XML element, or None if not found.", "name": "_get", "signature": "def _get(self, tag)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002880", "prompt": "Implement the Python class `MetaPubObject` described below.\n\nClass description:\nBase class for XML parsing objects (e.g. PubMedArticle)\n\nMethod signatures and docstrings:\n- def __init__(self, xml, root=None, *args, **kwargs): Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\n- def parse_xml(xml, root=None): Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\n- def _get(self, tag): Returns content of named XML element, or None if not found.", "prompted_full_text": "Implement the Python class `MetaPubObject` described below.\n\nClass description:\nBase class for XML parsing objects (e.g. PubMedArticle)\n\nMethod signatures and docstrings:\n- def __init__(self, xml, root=None, *args, **kwargs): Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\n- def parse_xml(xml, root=None): Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\n- def _get(self, tag): Returns content of named XML element, or None if not found.\n\n<|skeleton|>\nclass MetaPubObject:\n \"\"\"Base class for XML parsing objects (e.g. PubMedArticle)\"\"\"\n\n def __init__(self, xml, root=None, *args, **kwargs):\n \"\"\"Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\"\"\"\n <|body_0|>\n\n def parse_xml(xml, root=None):\n \"\"\"Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\"\"\"\n <|body_1|>\n\n def _get(self, tag):\n \"\"\"Returns content of named XML element, or None if not found.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not xml:\n if xml == '':\n xml = 'empty'\n raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)\n self.xml = xml\n self.content = self.parse_xml(xml, root)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(xml, str) or isinstance(xml, bytes):\n dom = etree.XML(xml)\n else:\n dom = etree.XML(xml)\n if root:\n return dom.find(root)\n else:\n return dom\n<|end_body_1|>\n\n<|body_start_2|>\n elem = self.content.find(tag)\n if elem is not None:\n return elem.text\n return None\n<|end_body_2|>\n", "revision_id": "7dc3f2321720191d461056deeaedf69cd1479157", "skeleton": "<|skeleton|>\nclass MetaPubObject:\n \"\"\"Base class for XML parsing objects (e.g. PubMedArticle)\"\"\"\n\n def __init__(self, xml, root=None, *args, **kwargs):\n \"\"\"Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\"\"\"\n <|body_0|>\n\n def parse_xml(xml, root=None):\n \"\"\"Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\"\"\"\n <|body_1|>\n\n def _get(self, tag):\n \"\"\"Returns content of named XML element, or None if not found.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MetaPubObject:\n \"\"\"Base class for XML parsing objects (e.g. PubMedArticle)\"\"\"\n\n def __init__(self, xml, root=None, *args, **kwargs):\n \"\"\"Instantiate with \"xml\" as string or bytes containing valid XML. Supply name of root element (string) to set virtual top level. (optional).\"\"\"\n if not xml:\n if xml == '':\n xml = 'empty'\n raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)\n self.xml = xml\n self.content = self.parse_xml(xml, root)\n\n def parse_xml(xml, root=None):\n \"\"\"Takes xml (str or bytes) and (optionally) a root element definition string. If root element defined, DOM object returned is rebased with this element as root. Args: xml (str or bytes) root (str): (optional) name of root element Returns: lxml document object.\"\"\"\n if isinstance(xml, str) or isinstance(xml, bytes):\n dom = etree.XML(xml)\n else:\n dom = etree.XML(xml)\n if root:\n return dom.find(root)\n else:\n return dom\n\n def _get(self, tag):\n \"\"\"Returns content of named XML element, or None if not found.\"\"\"\n elem = self.content.find(tag)\n if elem is not None:\n return elem.text\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "metapub/base.py", "source_repo": "pmartin23/metapub", "split": "val", "star_events_count": 0} {"blob_id": "fecce9bcfcc5d0f9fea50cc8877178f1a5698e8a", "bodies": ["ids = self.cur_devs.ids\nrail_cache = {dev.id: dev.cur_rail.id for dev in self.cur_devs}\nold_dev_ids = self.plan_infos.mapped('cur_train_id.id')\nitems = []\nfor tmp_id in ids:\n if tmp_id not in old_dev_ids:\n items.append((0, 0, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'exchange_rail_time': utility.get_now_time_int_repr() + 10 * 60, 'type': 'back'}))\n else:\n for info in self.plan_infos:\n if info.cur_train_id.id == tmp_id:\n items.append((1, info.id, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'type': 'back'}))\n break\nfor tmp in self.plan_infos:\n if tmp.cur_train_id.id not in ids:\n items.append((2, tmp.id))\nself.plan_infos = items", "vals_list = []\nlog_list = []\nlocation = self.env.user.cur_location\nif not location:\n raise exceptions.ValidationError('当前用户没有配置场段!请在右上角头像处配置!')\nback_train_need_min = location.receive_train_need_min\nfor info in self.plan_infos:\n if not info.plan_time or not info.rail:\n raise exceptions.Warning('信息填写不完整')\n val = {'status': 'unpublish', 'train_id': info.cur_train_id.id, 'plan_back_location': location.id, 'date': self.plan_date, 'plan_back_time': info.plan_time, 'plan_back_rail': info.rail.id, 'exchange_rail_time': info.exchange_rail_time, 'plan_train_no': info.plan_train_no}\n if info.exchange_rail_time:\n val['plan_back_time'] = info.exchange_rail_time - back_train_need_min * 60\n else:\n val['plan_back_time'] = None\n vals_list.append(val)\n log = {'type': 'in_plan', 'train_dev': info.cur_train_id.train.id, 'operation': '新增收车计划'}\n log_list.append(log)\nself.env['metro_park_dispatch.train_back_plan'].create(vals_list)\nself.env['metro_park_dispatch.train_in_out_log'].create(log_list)"], "bodies_text": "<|body_start_0|>\n ids = self.cur_devs.ids\n rail_cache = {dev.id: dev.cur_rail.id for dev in self.cur_devs}\n old_dev_ids = self.plan_infos.mapped('cur_train_id.id')\n items = []\n for tmp_id in ids:\n if tmp_id not in old_dev_ids:\n items.append((0, 0, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'exchange_rail_time': utility.get_now_time_int_repr() + 10 * 60, 'type': 'back'}))\n else:\n for info in self.plan_infos:\n if info.cur_train_id.id == tmp_id:\n items.append((1, info.id, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'type': 'back'}))\n break\n for tmp in self.plan_infos:\n if tmp.cur_train_id.id not in ids:\n items.append((2, tmp.id))\n self.plan_infos = items\n<|end_body_0|>\n\n<|body_start_1|>\n vals_list = []\n log_list = []\n location = self.env.user.cur_location\n if not location:\n raise exceptions.ValidationError('当前用户没有配置场段!请在右上角头像处配置!')\n back_train_need_min = location.receive_train_need_min\n for info in self.plan_infos:\n if not info.plan_time or not info.rail:\n raise exceptions.Warning('信息填写不完整')\n val = {'status': 'unpublish', 'train_id': info.cur_train_id.id, 'plan_back_location': location.id, 'date': self.plan_date, 'plan_back_time': info.plan_time, 'plan_back_rail': info.rail.id, 'exchange_rail_time': info.exchange_rail_time, 'plan_train_no': info.plan_train_no}\n if info.exchange_rail_time:\n val['plan_back_time'] = info.exchange_rail_time - back_train_need_min * 60\n else:\n val['plan_back_time'] = None\n vals_list.append(val)\n log = {'type': 'in_plan', 'train_dev': info.cur_train_id.train.id, 'operation': '新增收车计划'}\n log_list.append(log)\n self.env['metro_park_dispatch.train_back_plan'].create(vals_list)\n self.env['metro_park_dispatch.train_in_out_log'].create(log_list)\n<|end_body_1|>\n", "class_docstring": "添加新的收车计划", "class_name": "AddNewBackPlan", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AddNewBackPlan:\n \"\"\"添加新的收车计划\"\"\"\n\n def on_change_cur_devs(self):\n \"\"\"加开只能是 :return:\"\"\"\n <|body_0|>\n\n def on_ok(self):\n \"\"\"点击确定 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ids = self.cur_devs.ids\n rail_cache = {dev.id: dev.cur_rail.id for dev in self.cur_devs}\n old_dev_ids = self.plan_infos.mapped('cur_train_id.id')\n items = []\n for tmp_id in ids:\n if tmp_id not in old_dev_ids:\n items.append((0, 0, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'exchange_rail_time': utility.get_now_time_int_repr() + 10 * 60, 'type': 'back'}))\n else:\n for info in self.plan_infos:\n if info.cur_train_id.id == tmp_id:\n items.append((1, info.id, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'type': 'back'}))\n break\n for tmp in self.plan_infos:\n if tmp.cur_train_id.id not in ids:\n items.append((2, tmp.id))\n self.plan_infos = items\n<|end_body_0|>\n\n<|body_start_1|>\n vals_list = []\n log_list = []\n location = self.env.user.cur_location\n if not location:\n raise exceptions.ValidationError('当前用户没有配置场段!请在右上角头像处配置!')\n back_train_need_min = location.receive_train_need_min\n for info in self.plan_infos:\n if not info.plan_time or not info.rail:\n raise exceptions.Warning('信息填写不完整')\n val = {'status': 'unpublish', 'train_id': info.cur_train_id.id, 'plan_back_location': location.id, 'date': self.plan_date, 'plan_back_time': info.plan_time, 'plan_back_rail': info.rail.id, 'exchange_rail_time': info.exchange_rail_time, 'plan_train_no': info.plan_train_no}\n if info.exchange_rail_time:\n val['plan_back_time'] = info.exchange_rail_time - back_train_need_min * 60\n else:\n val['plan_back_time'] = None\n vals_list.append(val)\n log = {'type': 'in_plan', 'train_dev': info.cur_train_id.train.id, 'operation': '新增收车计划'}\n log_list.append(log)\n self.env['metro_park_dispatch.train_back_plan'].create(vals_list)\n self.env['metro_park_dispatch.train_in_out_log'].create(log_list)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000266", "length_bytes": 4155, "license_type": "no_license", "methods": [{"docstring": "加开只能是 :return:", "name": "on_change_cur_devs", "signature": "def on_change_cur_devs(self)"}, {"docstring": "点击确定 :return:", "name": "on_ok", "signature": "def on_ok(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006846", "prompt": "Implement the Python class `AddNewBackPlan` described below.\n\nClass description:\n添加新的收车计划\n\nMethod signatures and docstrings:\n- def on_change_cur_devs(self): 加开只能是 :return:\n- def on_ok(self): 点击确定 :return:", "prompted_full_text": "Implement the Python class `AddNewBackPlan` described below.\n\nClass description:\n添加新的收车计划\n\nMethod signatures and docstrings:\n- def on_change_cur_devs(self): 加开只能是 :return:\n- def on_ok(self): 点击确定 :return:\n\n<|skeleton|>\nclass AddNewBackPlan:\n \"\"\"添加新的收车计划\"\"\"\n\n def on_change_cur_devs(self):\n \"\"\"加开只能是 :return:\"\"\"\n <|body_0|>\n\n def on_ok(self):\n \"\"\"点击确定 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ids = self.cur_devs.ids\n rail_cache = {dev.id: dev.cur_rail.id for dev in self.cur_devs}\n old_dev_ids = self.plan_infos.mapped('cur_train_id.id')\n items = []\n for tmp_id in ids:\n if tmp_id not in old_dev_ids:\n items.append((0, 0, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'exchange_rail_time': utility.get_now_time_int_repr() + 10 * 60, 'type': 'back'}))\n else:\n for info in self.plan_infos:\n if info.cur_train_id.id == tmp_id:\n items.append((1, info.id, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'type': 'back'}))\n break\n for tmp in self.plan_infos:\n if tmp.cur_train_id.id not in ids:\n items.append((2, tmp.id))\n self.plan_infos = items\n<|end_body_0|>\n\n<|body_start_1|>\n vals_list = []\n log_list = []\n location = self.env.user.cur_location\n if not location:\n raise exceptions.ValidationError('当前用户没有配置场段!请在右上角头像处配置!')\n back_train_need_min = location.receive_train_need_min\n for info in self.plan_infos:\n if not info.plan_time or not info.rail:\n raise exceptions.Warning('信息填写不完整')\n val = {'status': 'unpublish', 'train_id': info.cur_train_id.id, 'plan_back_location': location.id, 'date': self.plan_date, 'plan_back_time': info.plan_time, 'plan_back_rail': info.rail.id, 'exchange_rail_time': info.exchange_rail_time, 'plan_train_no': info.plan_train_no}\n if info.exchange_rail_time:\n val['plan_back_time'] = info.exchange_rail_time - back_train_need_min * 60\n else:\n val['plan_back_time'] = None\n vals_list.append(val)\n log = {'type': 'in_plan', 'train_dev': info.cur_train_id.train.id, 'operation': '新增收车计划'}\n log_list.append(log)\n self.env['metro_park_dispatch.train_back_plan'].create(vals_list)\n self.env['metro_park_dispatch.train_in_out_log'].create(log_list)\n<|end_body_1|>\n", "revision_id": "13b428a5c4ade6278e3e5e996ef10d9fb0fea4b9", "skeleton": "<|skeleton|>\nclass AddNewBackPlan:\n \"\"\"添加新的收车计划\"\"\"\n\n def on_change_cur_devs(self):\n \"\"\"加开只能是 :return:\"\"\"\n <|body_0|>\n\n def on_ok(self):\n \"\"\"点击确定 :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AddNewBackPlan:\n \"\"\"添加新的收车计划\"\"\"\n\n def on_change_cur_devs(self):\n \"\"\"加开只能是 :return:\"\"\"\n ids = self.cur_devs.ids\n rail_cache = {dev.id: dev.cur_rail.id for dev in self.cur_devs}\n old_dev_ids = self.plan_infos.mapped('cur_train_id.id')\n items = []\n for tmp_id in ids:\n if tmp_id not in old_dev_ids:\n items.append((0, 0, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'exchange_rail_time': utility.get_now_time_int_repr() + 10 * 60, 'type': 'back'}))\n else:\n for info in self.plan_infos:\n if info.cur_train_id.id == tmp_id:\n items.append((1, info.id, {'cur_train_id': tmp_id, 'rail': rail_cache[tmp_id], 'type': 'back'}))\n break\n for tmp in self.plan_infos:\n if tmp.cur_train_id.id not in ids:\n items.append((2, tmp.id))\n self.plan_infos = items\n\n def on_ok(self):\n \"\"\"点击确定 :return:\"\"\"\n vals_list = []\n log_list = []\n location = self.env.user.cur_location\n if not location:\n raise exceptions.ValidationError('当前用户没有配置场段!请在右上角头像处配置!')\n back_train_need_min = location.receive_train_need_min\n for info in self.plan_infos:\n if not info.plan_time or not info.rail:\n raise exceptions.Warning('信息填写不完整')\n val = {'status': 'unpublish', 'train_id': info.cur_train_id.id, 'plan_back_location': location.id, 'date': self.plan_date, 'plan_back_time': info.plan_time, 'plan_back_rail': info.rail.id, 'exchange_rail_time': info.exchange_rail_time, 'plan_train_no': info.plan_train_no}\n if info.exchange_rail_time:\n val['plan_back_time'] = info.exchange_rail_time - back_train_need_min * 60\n else:\n val['plan_back_time'] = None\n vals_list.append(val)\n log = {'type': 'in_plan', 'train_dev': info.cur_train_id.train.id, 'operation': '新增收车计划'}\n log_list.append(log)\n self.env['metro_park_dispatch.train_back_plan'].create(vals_list)\n self.env['metro_park_dispatch.train_in_out_log'].create(log_list)\n", "source": "the_stack_v2_python_sparse", "source_path": "mdias_addons/metro_park_dispatch/models/add_new_back_plan.py", "source_repo": "rezaghanimi/main_mdias", "split": "val", "star_events_count": 0} {"blob_id": "e3facf73ef700212c7a918a2681e121286eb835b", "bodies": ["tmp = [Node(i, strs[i]) for i in range(len(strs))]\ntmp.sort()\nresult = dict()\nfor p in tmp:\n if p.string not in result:\n result[p.string] = [strs[p.id]]\n else:\n result[p.string].append(strs[p.id])\nreturn list(result.values())", "d = dict()\nfor w in strs:\n word = getCountsStr(w)\n if word not in d:\n d[word] = [w]\n else:\n d[word].append(w)\nreturn list(d.values())"], "bodies_text": "<|body_start_0|>\n tmp = [Node(i, strs[i]) for i in range(len(strs))]\n tmp.sort()\n result = dict()\n for p in tmp:\n if p.string not in result:\n result[p.string] = [strs[p.id]]\n else:\n result[p.string].append(strs[p.id])\n return list(result.values())\n<|end_body_0|>\n\n<|body_start_1|>\n d = dict()\n for w in strs:\n word = getCountsStr(w)\n if word not in d:\n d[word] = [w]\n else:\n d[word].append(w)\n return list(d.values())\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def groupAnagrams(self, strs):\n \"\"\":type strs: List[str] :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def groupAnagrams2(self, strs):\n \"\"\"计数法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tmp = [Node(i, strs[i]) for i in range(len(strs))]\n tmp.sort()\n result = dict()\n for p in tmp:\n if p.string not in result:\n result[p.string] = [strs[p.id]]\n else:\n result[p.string].append(strs[p.id])\n return list(result.values())\n<|end_body_0|>\n\n<|body_start_1|>\n d = dict()\n for w in strs:\n word = getCountsStr(w)\n if word not in d:\n d[word] = [w]\n else:\n d[word].append(w)\n return list(d.values())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000267", "length_bytes": 1758, "license_type": "no_license", "methods": [{"docstring": ":type strs: List[str] :rtype: List[List[str]]", "name": "groupAnagrams", "signature": "def groupAnagrams(self, strs)"}, {"docstring": "计数法", "name": "groupAnagrams2", "signature": "def groupAnagrams2(self, strs)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def groupAnagrams(self, strs): :type strs: List[str] :rtype: List[List[str]]\n- def groupAnagrams2(self, strs): 计数法", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def groupAnagrams(self, strs): :type strs: List[str] :rtype: List[List[str]]\n- def groupAnagrams2(self, strs): 计数法\n\n<|skeleton|>\nclass Solution:\n\n def groupAnagrams(self, strs):\n \"\"\":type strs: List[str] :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def groupAnagrams2(self, strs):\n \"\"\"计数法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tmp = [Node(i, strs[i]) for i in range(len(strs))]\n tmp.sort()\n result = dict()\n for p in tmp:\n if p.string not in result:\n result[p.string] = [strs[p.id]]\n else:\n result[p.string].append(strs[p.id])\n return list(result.values())\n<|end_body_0|>\n\n<|body_start_1|>\n d = dict()\n for w in strs:\n word = getCountsStr(w)\n if word not in d:\n d[word] = [w]\n else:\n d[word].append(w)\n return list(d.values())\n<|end_body_1|>\n", "revision_id": "837957ea22aa07ce28a6c23ea0419bd2011e1f88", "skeleton": "<|skeleton|>\nclass Solution:\n\n def groupAnagrams(self, strs):\n \"\"\":type strs: List[str] :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def groupAnagrams2(self, strs):\n \"\"\"计数法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def groupAnagrams(self, strs):\n \"\"\":type strs: List[str] :rtype: List[List[str]]\"\"\"\n tmp = [Node(i, strs[i]) for i in range(len(strs))]\n tmp.sort()\n result = dict()\n for p in tmp:\n if p.string not in result:\n result[p.string] = [strs[p.id]]\n else:\n result[p.string].append(strs[p.id])\n return list(result.values())\n\n def groupAnagrams2(self, strs):\n \"\"\"计数法\"\"\"\n d = dict()\n for w in strs:\n word = getCountsStr(w)\n if word not in d:\n d[word] = [w]\n else:\n d[word].append(w)\n return list(d.values())\n", "source": "the_stack_v2_python_sparse", "source_path": "Tencent/midum/字母异位词分组.py", "source_repo": "2226171237/Algorithmpractice", "split": "val", "star_events_count": 0} {"blob_id": "fed57dc3bb8cc0043c1505cca7ff05c86473b362", "bodies": ["super().__init__()\nself.inventory = None\nself.supplier_inventory = None\nself.indicators = None\nif inventory is not None and supplier_inventory is not None:\n self.build(inventory, supplier_inventory)", "print(f'Building QUBO')\nself.inventory = inventory\nself.supplier_inventory = supplier_inventory\nself.qubo = self.construct_bqm()", "self.indicators = []\nfor i in range(len(self.supplier_inventory)):\n self.indicators.append([1 if self.inventory[a] in self.supplier_inventory[i] else 0 for a in range(len(self.inventory))])\nbqm = BinaryQuadraticModel(BINARY)\nself.x = [bqm.add_variable(f'x_{i + 1}', SupplierQubo.lagrange_a * sum(self.indicators[i]) + SupplierQubo.lagrange_b) for i in range(0, len(self.supplier_inventory))]\ny = []\nfor a in range(1, len(self.inventory) + 1):\n y.append([bqm.add_variable(f'y_{(a, m)}', SupplierQubo.lagrange_a * (m ** 2 - 1)) for m in range(1, len(self.supplier_inventory) + 1)])\nfor i in range(1, len(self.supplier_inventory) + 1):\n for j in range(i + 1, len(self.supplier_inventory) + 1):\n key = ('x_' + str(i), 'x_' + str(j))\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * np.dot(np.array(self.indicators[i - 1]), np.array(self.indicators[j - 1]))\nfor m in range(1, len(self.supplier_inventory) + 1):\n for n in range(m + 1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('y_(' + str(a) + ', ' + str(m) + ')', 'y_(' + str(a) + ', ' + str(n) + ')')\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * (1 + m * n)\nfor i in range(1, len(self.supplier_inventory) + 1):\n for m in range(1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('x_' + str(i), 'y_(' + str(a) + ', ' + str(m) + ')')\n bqm.quadratic[key] = -2 * SupplierQubo.lagrange_a * m * self.indicators[i - 1][a - 1]\nreturn bqm", "res = []\nfor solution in solutions:\n res.append([solution[i] for i in self.x])\nreturn res"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.inventory = None\n self.supplier_inventory = None\n self.indicators = None\n if inventory is not None and supplier_inventory is not None:\n self.build(inventory, supplier_inventory)\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'Building QUBO')\n self.inventory = inventory\n self.supplier_inventory = supplier_inventory\n self.qubo = self.construct_bqm()\n<|end_body_1|>\n\n<|body_start_2|>\n self.indicators = []\n for i in range(len(self.supplier_inventory)):\n self.indicators.append([1 if self.inventory[a] in self.supplier_inventory[i] else 0 for a in range(len(self.inventory))])\n bqm = BinaryQuadraticModel(BINARY)\n self.x = [bqm.add_variable(f'x_{i + 1}', SupplierQubo.lagrange_a * sum(self.indicators[i]) + SupplierQubo.lagrange_b) for i in range(0, len(self.supplier_inventory))]\n y = []\n for a in range(1, len(self.inventory) + 1):\n y.append([bqm.add_variable(f'y_{(a, m)}', SupplierQubo.lagrange_a * (m ** 2 - 1)) for m in range(1, len(self.supplier_inventory) + 1)])\n for i in range(1, len(self.supplier_inventory) + 1):\n for j in range(i + 1, len(self.supplier_inventory) + 1):\n key = ('x_' + str(i), 'x_' + str(j))\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * np.dot(np.array(self.indicators[i - 1]), np.array(self.indicators[j - 1]))\n for m in range(1, len(self.supplier_inventory) + 1):\n for n in range(m + 1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('y_(' + str(a) + ', ' + str(m) + ')', 'y_(' + str(a) + ', ' + str(n) + ')')\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * (1 + m * n)\n for i in range(1, len(self.supplier_inventory) + 1):\n for m in range(1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('x_' + str(i), 'y_(' + str(a) + ', ' + str(m) + ')')\n bqm.quadratic[key] = -2 * SupplierQubo.lagrange_a * m * self.indicators[i - 1][a - 1]\n return bqm\n<|end_body_2|>\n\n<|body_start_3|>\n res = []\n for solution in solutions:\n res.append([solution[i] for i in self.x])\n return res\n<|end_body_3|>\n", "class_docstring": "", "class_name": "SupplierQubo", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SupplierQubo:\n\n def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None:\n \"\"\"Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_0|>\n\n def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None):\n \"\"\"Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_1|>\n\n def construct_bqm(self):\n \"\"\"Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\"\"\"\n <|body_2|>\n\n def _post_process(self, solutions):\n \"\"\"Hack to return data formatted as expected\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.inventory = None\n self.supplier_inventory = None\n self.indicators = None\n if inventory is not None and supplier_inventory is not None:\n self.build(inventory, supplier_inventory)\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'Building QUBO')\n self.inventory = inventory\n self.supplier_inventory = supplier_inventory\n self.qubo = self.construct_bqm()\n<|end_body_1|>\n\n<|body_start_2|>\n self.indicators = []\n for i in range(len(self.supplier_inventory)):\n self.indicators.append([1 if self.inventory[a] in self.supplier_inventory[i] else 0 for a in range(len(self.inventory))])\n bqm = BinaryQuadraticModel(BINARY)\n self.x = [bqm.add_variable(f'x_{i + 1}', SupplierQubo.lagrange_a * sum(self.indicators[i]) + SupplierQubo.lagrange_b) for i in range(0, len(self.supplier_inventory))]\n y = []\n for a in range(1, len(self.inventory) + 1):\n y.append([bqm.add_variable(f'y_{(a, m)}', SupplierQubo.lagrange_a * (m ** 2 - 1)) for m in range(1, len(self.supplier_inventory) + 1)])\n for i in range(1, len(self.supplier_inventory) + 1):\n for j in range(i + 1, len(self.supplier_inventory) + 1):\n key = ('x_' + str(i), 'x_' + str(j))\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * np.dot(np.array(self.indicators[i - 1]), np.array(self.indicators[j - 1]))\n for m in range(1, len(self.supplier_inventory) + 1):\n for n in range(m + 1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('y_(' + str(a) + ', ' + str(m) + ')', 'y_(' + str(a) + ', ' + str(n) + ')')\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * (1 + m * n)\n for i in range(1, len(self.supplier_inventory) + 1):\n for m in range(1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('x_' + str(i), 'y_(' + str(a) + ', ' + str(m) + ')')\n bqm.quadratic[key] = -2 * SupplierQubo.lagrange_a * m * self.indicators[i - 1][a - 1]\n return bqm\n<|end_body_2|>\n\n<|body_start_3|>\n res = []\n for solution in solutions:\n res.append([solution[i] for i in self.x])\n return res\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000268", "length_bytes": 4576, "license_type": "permissive", "methods": [{"docstring": "Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory", "name": "__init__", "signature": "def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None"}, {"docstring": "Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory", "name": "build", "signature": "def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None)"}, {"docstring": "Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance", "name": "construct_bqm", "signature": "def construct_bqm(self)"}, {"docstring": "Hack to return data formatted as expected", "name": "_post_process", "signature": "def _post_process(self, solutions)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000611", "prompt": "Implement the Python class `SupplierQubo` described below.\n\nClass description:\nImplement the SupplierQubo class.\n\nMethod signatures and docstrings:\n- def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None: Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\n- def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None): Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\n- def construct_bqm(self): Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\n- def _post_process(self, solutions): Hack to return data formatted as expected", "prompted_full_text": "Implement the Python class `SupplierQubo` described below.\n\nClass description:\nImplement the SupplierQubo class.\n\nMethod signatures and docstrings:\n- def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None: Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\n- def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None): Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\n- def construct_bqm(self): Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\n- def _post_process(self, solutions): Hack to return data formatted as expected\n\n<|skeleton|>\nclass SupplierQubo:\n\n def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None:\n \"\"\"Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_0|>\n\n def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None):\n \"\"\"Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_1|>\n\n def construct_bqm(self):\n \"\"\"Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\"\"\"\n <|body_2|>\n\n def _post_process(self, solutions):\n \"\"\"Hack to return data formatted as expected\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.inventory = None\n self.supplier_inventory = None\n self.indicators = None\n if inventory is not None and supplier_inventory is not None:\n self.build(inventory, supplier_inventory)\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'Building QUBO')\n self.inventory = inventory\n self.supplier_inventory = supplier_inventory\n self.qubo = self.construct_bqm()\n<|end_body_1|>\n\n<|body_start_2|>\n self.indicators = []\n for i in range(len(self.supplier_inventory)):\n self.indicators.append([1 if self.inventory[a] in self.supplier_inventory[i] else 0 for a in range(len(self.inventory))])\n bqm = BinaryQuadraticModel(BINARY)\n self.x = [bqm.add_variable(f'x_{i + 1}', SupplierQubo.lagrange_a * sum(self.indicators[i]) + SupplierQubo.lagrange_b) for i in range(0, len(self.supplier_inventory))]\n y = []\n for a in range(1, len(self.inventory) + 1):\n y.append([bqm.add_variable(f'y_{(a, m)}', SupplierQubo.lagrange_a * (m ** 2 - 1)) for m in range(1, len(self.supplier_inventory) + 1)])\n for i in range(1, len(self.supplier_inventory) + 1):\n for j in range(i + 1, len(self.supplier_inventory) + 1):\n key = ('x_' + str(i), 'x_' + str(j))\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * np.dot(np.array(self.indicators[i - 1]), np.array(self.indicators[j - 1]))\n for m in range(1, len(self.supplier_inventory) + 1):\n for n in range(m + 1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('y_(' + str(a) + ', ' + str(m) + ')', 'y_(' + str(a) + ', ' + str(n) + ')')\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * (1 + m * n)\n for i in range(1, len(self.supplier_inventory) + 1):\n for m in range(1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('x_' + str(i), 'y_(' + str(a) + ', ' + str(m) + ')')\n bqm.quadratic[key] = -2 * SupplierQubo.lagrange_a * m * self.indicators[i - 1][a - 1]\n return bqm\n<|end_body_2|>\n\n<|body_start_3|>\n res = []\n for solution in solutions:\n res.append([solution[i] for i in self.x])\n return res\n<|end_body_3|>\n", "revision_id": "de3a36e292683485682f0f7b12aabcf8f548bab7", "skeleton": "<|skeleton|>\nclass SupplierQubo:\n\n def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None:\n \"\"\"Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_0|>\n\n def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None):\n \"\"\"Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n <|body_1|>\n\n def construct_bqm(self):\n \"\"\"Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\"\"\"\n <|body_2|>\n\n def _post_process(self, solutions):\n \"\"\"Hack to return data formatted as expected\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SupplierQubo:\n def __init__(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None) -> None:\n \"\"\"Initializes the SupplierQubo inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n super().__init__()\n self.inventory = None\n self.supplier_inventory = None\n self.indicators = None\n if inventory is not None and supplier_inventory is not None:\n self.build(inventory, supplier_inventory)\n\n def build(self, inventory: list[int or str] or None, supplier_inventory: list[set[int or str]] or None):\n \"\"\"Bulds the qubo Args: inventory (list): List of items we want for our inventory supplier_inventory (list of sets): List for each supplier their inventory\"\"\"\n print(f'Building QUBO')\n self.inventory = inventory\n self.supplier_inventory = supplier_inventory\n self.qubo = self.construct_bqm()\n\n def construct_bqm(self):\n \"\"\"Construct BQM for the generalized set cover problem Args: Returns: Binary quadratic model instance\"\"\"\n self.indicators = []\n for i in range(len(self.supplier_inventory)):\n self.indicators.append([1 if self.inventory[a] in self.supplier_inventory[i] else 0 for a in range(len(self.inventory))])\n bqm = BinaryQuadraticModel(BINARY)\n self.x = [bqm.add_variable(f'x_{i + 1}', SupplierQubo.lagrange_a * sum(self.indicators[i]) + SupplierQubo.lagrange_b) for i in range(0, len(self.supplier_inventory))]\n y = []\n for a in range(1, len(self.inventory) + 1):\n y.append([bqm.add_variable(f'y_{(a, m)}', SupplierQubo.lagrange_a * (m ** 2 - 1)) for m in range(1, len(self.supplier_inventory) + 1)])\n for i in range(1, len(self.supplier_inventory) + 1):\n for j in range(i + 1, len(self.supplier_inventory) + 1):\n key = ('x_' + str(i), 'x_' + str(j))\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * np.dot(np.array(self.indicators[i - 1]), np.array(self.indicators[j - 1]))\n for m in range(1, len(self.supplier_inventory) + 1):\n for n in range(m + 1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('y_(' + str(a) + ', ' + str(m) + ')', 'y_(' + str(a) + ', ' + str(n) + ')')\n bqm.quadratic[key] = 2 * SupplierQubo.lagrange_a * (1 + m * n)\n for i in range(1, len(self.supplier_inventory) + 1):\n for m in range(1, len(self.supplier_inventory) + 1):\n for a in range(1, len(self.inventory) + 1):\n key = ('x_' + str(i), 'y_(' + str(a) + ', ' + str(m) + ')')\n bqm.quadratic[key] = -2 * SupplierQubo.lagrange_a * m * self.indicators[i - 1][a - 1]\n return bqm\n\n def _post_process(self, solutions):\n \"\"\"Hack to return data formatted as expected\"\"\"\n res = []\n for solution in solutions:\n res.append([solution[i] for i in self.x])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "ZebraKet/models/SupplierQubo.py", "source_repo": "olegxtend/Hackathon2021", "split": "val", "star_events_count": 0} {"blob_id": "972a8971c70b19e856bc81b7b24a0d70a6743b6b", "bodies": ["super(TemporalConvNet, self).__init__()\nself.C = C\nself.mask_nonlinear = mask_nonlinear\nlayer_norm = ChannelwiseLayerNorm(N)\nbottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\nrepeats = []\nfor r in range(R):\n blocks = []\n for x in range(X):\n dilation = 2 ** x\n padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2\n blocks += [TemporalBlock(B, H, P, stride=1, padding=padding, dilation=dilation, norm_type=norm_type, causal=causal)]\n repeats += [nn.Sequential(*blocks)]\ntemporal_conv_net = nn.Sequential(*repeats)\nmask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)\nself.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1)", "M, N, K = mixture_w.size()\nscore = self.network(mixture_w)\nscore = score.view(M, self.C, N, K)\nif self.mask_nonlinear == 'softmax':\n est_mask = F.softmax(score, dim=1)\nelif self.mask_nonlinear == 'relu':\n est_mask = F.relu(score)\nelse:\n raise ValueError('Unsupported mask non-linear function')\nreturn est_mask"], "bodies_text": "<|body_start_0|>\n super(TemporalConvNet, self).__init__()\n self.C = C\n self.mask_nonlinear = mask_nonlinear\n layer_norm = ChannelwiseLayerNorm(N)\n bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\n repeats = []\n for r in range(R):\n blocks = []\n for x in range(X):\n dilation = 2 ** x\n padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2\n blocks += [TemporalBlock(B, H, P, stride=1, padding=padding, dilation=dilation, norm_type=norm_type, causal=causal)]\n repeats += [nn.Sequential(*blocks)]\n temporal_conv_net = nn.Sequential(*repeats)\n mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)\n self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1)\n<|end_body_0|>\n\n<|body_start_1|>\n M, N, K = mixture_w.size()\n score = self.network(mixture_w)\n score = score.view(M, self.C, N, K)\n if self.mask_nonlinear == 'softmax':\n est_mask = F.softmax(score, dim=1)\n elif self.mask_nonlinear == 'relu':\n est_mask = F.relu(score)\n else:\n raise ValueError('Unsupported mask non-linear function')\n return est_mask\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TemporalConvNet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TemporalConvNet:\n\n def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'):\n \"\"\"Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\"\"\"\n <|body_0|>\n\n def forward(self, mixture_w):\n \"\"\"Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TemporalConvNet, self).__init__()\n self.C = C\n self.mask_nonlinear = mask_nonlinear\n layer_norm = ChannelwiseLayerNorm(N)\n bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\n repeats = []\n for r in range(R):\n blocks = []\n for x in range(X):\n dilation = 2 ** x\n padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2\n blocks += [TemporalBlock(B, H, P, stride=1, padding=padding, dilation=dilation, norm_type=norm_type, causal=causal)]\n repeats += [nn.Sequential(*blocks)]\n temporal_conv_net = nn.Sequential(*repeats)\n mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)\n self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1)\n<|end_body_0|>\n\n<|body_start_1|>\n M, N, K = mixture_w.size()\n score = self.network(mixture_w)\n score = score.view(M, self.C, N, K)\n if self.mask_nonlinear == 'softmax':\n est_mask = F.softmax(score, dim=1)\n elif self.mask_nonlinear == 'relu':\n est_mask = F.relu(score)\n else:\n raise ValueError('Unsupported mask non-linear function')\n return est_mask\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000269", "length_bytes": 15161, "license_type": "no_license", "methods": [{"docstring": "Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask", "name": "__init__", "signature": "def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu')"}, {"docstring": "Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]", "name": "forward", "signature": "def forward(self, mixture_w)"}], "n_methods": 2, "prompt": "Implement the Python class `TemporalConvNet` described below.\n\nClass description:\nImplement the TemporalConvNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'): Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\n- def forward(self, mixture_w): Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]", "prompted_full_text": "Implement the Python class `TemporalConvNet` described below.\n\nClass description:\nImplement the TemporalConvNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'): Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\n- def forward(self, mixture_w): Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]\n\n<|skeleton|>\nclass TemporalConvNet:\n\n def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'):\n \"\"\"Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\"\"\"\n <|body_0|>\n\n def forward(self, mixture_w):\n \"\"\"Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TemporalConvNet, self).__init__()\n self.C = C\n self.mask_nonlinear = mask_nonlinear\n layer_norm = ChannelwiseLayerNorm(N)\n bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\n repeats = []\n for r in range(R):\n blocks = []\n for x in range(X):\n dilation = 2 ** x\n padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2\n blocks += [TemporalBlock(B, H, P, stride=1, padding=padding, dilation=dilation, norm_type=norm_type, causal=causal)]\n repeats += [nn.Sequential(*blocks)]\n temporal_conv_net = nn.Sequential(*repeats)\n mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)\n self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1)\n<|end_body_0|>\n\n<|body_start_1|>\n M, N, K = mixture_w.size()\n score = self.network(mixture_w)\n score = score.view(M, self.C, N, K)\n if self.mask_nonlinear == 'softmax':\n est_mask = F.softmax(score, dim=1)\n elif self.mask_nonlinear == 'relu':\n est_mask = F.relu(score)\n else:\n raise ValueError('Unsupported mask non-linear function')\n return est_mask\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass TemporalConvNet:\n\n def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'):\n \"\"\"Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\"\"\"\n <|body_0|>\n\n def forward(self, mixture_w):\n \"\"\"Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TemporalConvNet:\n def __init__(self, N, B, H, P, X, R, C, norm_type='gLN', causal=False, mask_nonlinear='relu'):\n \"\"\"Args: N: Number of filters in autoencoder B: Number of channels in bottleneck 1 × 1-conv block H: Number of channels in convolutional blocks P: Kernel size in convolutional blocks X: Number of convolutional blocks in each repeat R: Number of repeats C: Number of speakers norm_type: BN, gLN, cLN causal: causal or non-causal mask_nonlinear: use which non-linear function to generate mask\"\"\"\n super(TemporalConvNet, self).__init__()\n self.C = C\n self.mask_nonlinear = mask_nonlinear\n layer_norm = ChannelwiseLayerNorm(N)\n bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\n repeats = []\n for r in range(R):\n blocks = []\n for x in range(X):\n dilation = 2 ** x\n padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2\n blocks += [TemporalBlock(B, H, P, stride=1, padding=padding, dilation=dilation, norm_type=norm_type, causal=causal)]\n repeats += [nn.Sequential(*blocks)]\n temporal_conv_net = nn.Sequential(*repeats)\n mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)\n self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, mask_conv1x1)\n\n def forward(self, mixture_w):\n \"\"\"Keep this API same with TasNet Args: mixture_w: [M, N, K], M is batch size returns: est_mask: [M, C, N, K]\"\"\"\n M, N, K = mixture_w.size()\n score = self.network(mixture_w)\n score = score.view(M, self.C, N, K)\n if self.mask_nonlinear == 'softmax':\n est_mask = F.softmax(score, dim=1)\n elif self.mask_nonlinear == 'relu':\n est_mask = F.relu(score)\n else:\n raise ValueError('Unsupported mask non-linear function')\n return est_mask\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_kaituoxu_Conv_TasNet.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "405bf38da00557f01944a124ac4c5ab04c3f26d9", "bodies": ["if not api_key_id:\n api_keys = []\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({'id': api_key.id, 'title': api_key.title, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active})\n return Response({'api_keys': api_keys}, status=status.HTTP_200_OK)\nelse:\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({'message': 'NO_PERMISSION_OR_NOT_EXIST', 'resource_id': api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n response = {'id': api_key.id, 'title': api_key.title, 'public_key': api_key.public_key, 'private_key': api_key.private_key, 'private_key_nonce': api_key.private_key_nonce, 'secret_key': api_key.secret_key, 'secret_key_nonce': api_key.secret_key_nonce, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active}\n return Response(response, status=status.HTTP_200_OK)", "serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\nif not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\napi_key = API_Key.objects.create(user=request.user, title=str(serializer.validated_data.get('title')), public_key=str(serializer.validated_data.get('public_key')), private_key=str(serializer.validated_data.get('private_key')), private_key_nonce=str(serializer.validated_data.get('private_key_nonce')), secret_key=str(serializer.validated_data.get('secret_key')), secret_key_nonce=str(serializer.validated_data.get('secret_key_nonce')), user_private_key=str(serializer.validated_data.get('user_private_key')), user_private_key_nonce=str(serializer.validated_data.get('user_private_key_nonce')), user_secret_key=str(serializer.validated_data.get('user_secret_key')), user_secret_key_nonce=str(serializer.validated_data.get('user_secret_key_nonce')), verify_key=str(serializer.validated_data.get('verify_key')), read=serializer.validated_data.get('read'), write=serializer.validated_data.get('write'), restrict_to_secrets=serializer.validated_data.get('restrict_to_secrets'), allow_insecure_access=serializer.validated_data.get('allow_insecure_access'))\nreturn Response({'api_key_id': api_key.id}, status=status.HTTP_201_CREATED)", "serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\nif not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\napi_key = serializer.validated_data.get('api_key')\ntitle = serializer.validated_data.get('title')\nread = serializer.validated_data.get('read')\nwrite = serializer.validated_data.get('write')\nrestrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\nallow_insecure_access = serializer.validated_data.get('allow_insecure_access')\nif title is not None:\n api_key.title = title\nif read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\nif write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\nif restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\nif allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\napi_key.save()\nreturn Response(status=status.HTTP_200_OK)", "serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\nif not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\napi_key = serializer.validated_data.get('api_key')\napi_key.delete()\nreturn Response(status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n if not api_key_id:\n api_keys = []\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({'id': api_key.id, 'title': api_key.title, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active})\n return Response({'api_keys': api_keys}, status=status.HTTP_200_OK)\n else:\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({'message': 'NO_PERMISSION_OR_NOT_EXIST', 'resource_id': api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n response = {'id': api_key.id, 'title': api_key.title, 'public_key': api_key.public_key, 'private_key': api_key.private_key, 'private_key_nonce': api_key.private_key_nonce, 'secret_key': api_key.secret_key, 'secret_key_nonce': api_key.secret_key_nonce, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = API_Key.objects.create(user=request.user, title=str(serializer.validated_data.get('title')), public_key=str(serializer.validated_data.get('public_key')), private_key=str(serializer.validated_data.get('private_key')), private_key_nonce=str(serializer.validated_data.get('private_key_nonce')), secret_key=str(serializer.validated_data.get('secret_key')), secret_key_nonce=str(serializer.validated_data.get('secret_key_nonce')), user_private_key=str(serializer.validated_data.get('user_private_key')), user_private_key_nonce=str(serializer.validated_data.get('user_private_key_nonce')), user_secret_key=str(serializer.validated_data.get('user_secret_key')), user_secret_key_nonce=str(serializer.validated_data.get('user_secret_key_nonce')), verify_key=str(serializer.validated_data.get('verify_key')), read=serializer.validated_data.get('read'), write=serializer.validated_data.get('write'), restrict_to_secrets=serializer.validated_data.get('restrict_to_secrets'), allow_insecure_access=serializer.validated_data.get('allow_insecure_access'))\n return Response({'api_key_id': api_key.id}, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n title = serializer.validated_data.get('title')\n read = serializer.validated_data.get('read')\n write = serializer.validated_data.get('write')\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access')\n if title is not None:\n api_key.title = title\n if read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\n if write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\n if restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\n if allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\n api_key.save()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n api_key.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_3|>\n", "class_docstring": "Check the REST Token and returns a list of all api_keys or the specified api_keys details", "class_name": "APIKeyView", "detected_licenses": ["BSD-3-Clause", "MIT", "Apache-2.0", "BSD-2-Clause", "CC0-1.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass APIKeyView:\n \"\"\"Check the REST Token and returns a list of all api_keys or the specified api_keys details\"\"\"\n\n def get(self, request, api_key_id=None, *args, **kwargs):\n \"\"\"Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\"\"\"\n <|body_1|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not api_key_id:\n api_keys = []\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({'id': api_key.id, 'title': api_key.title, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active})\n return Response({'api_keys': api_keys}, status=status.HTTP_200_OK)\n else:\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({'message': 'NO_PERMISSION_OR_NOT_EXIST', 'resource_id': api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n response = {'id': api_key.id, 'title': api_key.title, 'public_key': api_key.public_key, 'private_key': api_key.private_key, 'private_key_nonce': api_key.private_key_nonce, 'secret_key': api_key.secret_key, 'secret_key_nonce': api_key.secret_key_nonce, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = API_Key.objects.create(user=request.user, title=str(serializer.validated_data.get('title')), public_key=str(serializer.validated_data.get('public_key')), private_key=str(serializer.validated_data.get('private_key')), private_key_nonce=str(serializer.validated_data.get('private_key_nonce')), secret_key=str(serializer.validated_data.get('secret_key')), secret_key_nonce=str(serializer.validated_data.get('secret_key_nonce')), user_private_key=str(serializer.validated_data.get('user_private_key')), user_private_key_nonce=str(serializer.validated_data.get('user_private_key_nonce')), user_secret_key=str(serializer.validated_data.get('user_secret_key')), user_secret_key_nonce=str(serializer.validated_data.get('user_secret_key_nonce')), verify_key=str(serializer.validated_data.get('verify_key')), read=serializer.validated_data.get('read'), write=serializer.validated_data.get('write'), restrict_to_secrets=serializer.validated_data.get('restrict_to_secrets'), allow_insecure_access=serializer.validated_data.get('allow_insecure_access'))\n return Response({'api_key_id': api_key.id}, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n title = serializer.validated_data.get('title')\n read = serializer.validated_data.get('read')\n write = serializer.validated_data.get('write')\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access')\n if title is not None:\n api_key.title = title\n if read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\n if write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\n if restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\n if allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\n api_key.save()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n api_key.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000270", "length_bytes": 7116, "license_type": "permissive", "methods": [{"docstring": "Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:", "name": "get", "signature": "def get(self, request, api_key_id=None, *args, **kwargs)"}, {"docstring": "Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:", "name": "put", "signature": "def put(self, request, *args, **kwargs)"}, {"docstring": "Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}, {"docstring": "Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400", "name": "delete", "signature": "def delete(self, request, *args, **kwargs)"}], "n_methods": 4, "prompt": "Implement the Python class `APIKeyView` described below.\n\nClass description:\nCheck the REST Token and returns a list of all api_keys or the specified api_keys details\n\nMethod signatures and docstrings:\n- def get(self, request, api_key_id=None, *args, **kwargs): Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\n- def put(self, request, *args, **kwargs): Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\n- def post(self, request, *args, **kwargs): Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\n- def delete(self, request, *args, **kwargs): Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400", "prompted_full_text": "Implement the Python class `APIKeyView` described below.\n\nClass description:\nCheck the REST Token and returns a list of all api_keys or the specified api_keys details\n\nMethod signatures and docstrings:\n- def get(self, request, api_key_id=None, *args, **kwargs): Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\n- def put(self, request, *args, **kwargs): Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\n- def post(self, request, *args, **kwargs): Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\n- def delete(self, request, *args, **kwargs): Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400\n\n<|skeleton|>\nclass APIKeyView:\n \"\"\"Check the REST Token and returns a list of all api_keys or the specified api_keys details\"\"\"\n\n def get(self, request, api_key_id=None, *args, **kwargs):\n \"\"\"Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\"\"\"\n <|body_1|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not api_key_id:\n api_keys = []\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({'id': api_key.id, 'title': api_key.title, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active})\n return Response({'api_keys': api_keys}, status=status.HTTP_200_OK)\n else:\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({'message': 'NO_PERMISSION_OR_NOT_EXIST', 'resource_id': api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n response = {'id': api_key.id, 'title': api_key.title, 'public_key': api_key.public_key, 'private_key': api_key.private_key, 'private_key_nonce': api_key.private_key_nonce, 'secret_key': api_key.secret_key, 'secret_key_nonce': api_key.secret_key_nonce, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = API_Key.objects.create(user=request.user, title=str(serializer.validated_data.get('title')), public_key=str(serializer.validated_data.get('public_key')), private_key=str(serializer.validated_data.get('private_key')), private_key_nonce=str(serializer.validated_data.get('private_key_nonce')), secret_key=str(serializer.validated_data.get('secret_key')), secret_key_nonce=str(serializer.validated_data.get('secret_key_nonce')), user_private_key=str(serializer.validated_data.get('user_private_key')), user_private_key_nonce=str(serializer.validated_data.get('user_private_key_nonce')), user_secret_key=str(serializer.validated_data.get('user_secret_key')), user_secret_key_nonce=str(serializer.validated_data.get('user_secret_key_nonce')), verify_key=str(serializer.validated_data.get('verify_key')), read=serializer.validated_data.get('read'), write=serializer.validated_data.get('write'), restrict_to_secrets=serializer.validated_data.get('restrict_to_secrets'), allow_insecure_access=serializer.validated_data.get('allow_insecure_access'))\n return Response({'api_key_id': api_key.id}, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n title = serializer.validated_data.get('title')\n read = serializer.validated_data.get('read')\n write = serializer.validated_data.get('write')\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access')\n if title is not None:\n api_key.title = title\n if read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\n if write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\n if restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\n if allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\n api_key.save()\n return Response(status=status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n api_key.delete()\n return Response(status=status.HTTP_200_OK)\n<|end_body_3|>\n", "revision_id": "8936aa8ccdee8b9617ef7d894cb9a9a9f6f473cf", "skeleton": "<|skeleton|>\nclass APIKeyView:\n \"\"\"Check the REST Token and returns a list of all api_keys or the specified api_keys details\"\"\"\n\n def get(self, request, api_key_id=None, *args, **kwargs):\n \"\"\"Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\"\"\"\n <|body_1|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class APIKeyView:\n \"\"\"Check the REST Token and returns a list of all api_keys or the specified api_keys details\"\"\"\n\n def get(self, request, api_key_id=None, *args, **kwargs):\n \"\"\"Returns either a list of all api_keys with own access privileges or the members specified api_key :param request: :type request: :param api_key_id: :type api_key_id: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 403 :rtype:\"\"\"\n if not api_key_id:\n api_keys = []\n for api_key in API_Key.objects.filter(user=request.user):\n api_keys.append({'id': api_key.id, 'title': api_key.title, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active})\n return Response({'api_keys': api_keys}, status=status.HTTP_200_OK)\n else:\n try:\n api_key = API_Key.objects.get(id=api_key_id, user=request.user)\n except API_Key.DoesNotExist:\n return Response({'message': 'NO_PERMISSION_OR_NOT_EXIST', 'resource_id': api_key_id}, status=status.HTTP_400_BAD_REQUEST)\n response = {'id': api_key.id, 'title': api_key.title, 'public_key': api_key.public_key, 'private_key': api_key.private_key, 'private_key_nonce': api_key.private_key_nonce, 'secret_key': api_key.secret_key, 'secret_key_nonce': api_key.secret_key_nonce, 'read': api_key.read, 'write': api_key.write, 'restrict_to_secrets': api_key.restrict_to_secrets, 'allow_insecure_access': api_key.allow_insecure_access, 'active': api_key.active}\n return Response(response, status=status.HTTP_200_OK)\n\n def put(self, request, *args, **kwargs):\n \"\"\"Creates an api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 201 / 400 :rtype:\"\"\"\n serializer = CreateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = API_Key.objects.create(user=request.user, title=str(serializer.validated_data.get('title')), public_key=str(serializer.validated_data.get('public_key')), private_key=str(serializer.validated_data.get('private_key')), private_key_nonce=str(serializer.validated_data.get('private_key_nonce')), secret_key=str(serializer.validated_data.get('secret_key')), secret_key_nonce=str(serializer.validated_data.get('secret_key_nonce')), user_private_key=str(serializer.validated_data.get('user_private_key')), user_private_key_nonce=str(serializer.validated_data.get('user_private_key_nonce')), user_secret_key=str(serializer.validated_data.get('user_secret_key')), user_secret_key_nonce=str(serializer.validated_data.get('user_secret_key_nonce')), verify_key=str(serializer.validated_data.get('verify_key')), read=serializer.validated_data.get('read'), write=serializer.validated_data.get('write'), restrict_to_secrets=serializer.validated_data.get('restrict_to_secrets'), allow_insecure_access=serializer.validated_data.get('allow_insecure_access'))\n return Response({'api_key_id': api_key.id}, status=status.HTTP_201_CREATED)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Updates a api_key :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: :rtype:\"\"\"\n serializer = UpdateAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n title = serializer.validated_data.get('title')\n read = serializer.validated_data.get('read')\n write = serializer.validated_data.get('write')\n restrict_to_secrets = serializer.validated_data.get('restrict_to_secrets')\n allow_insecure_access = serializer.validated_data.get('allow_insecure_access')\n if title is not None:\n api_key.title = title\n if read is not None and api_key.read != read:\n api_key.read = read\n for token in api_key.tokens.all():\n token.read = read\n token.save()\n if write is not None and api_key.write != write:\n api_key.write = write\n for token in api_key.tokens.all():\n token.write = write\n token.save()\n if restrict_to_secrets is not None:\n api_key.restrict_to_secrets = restrict_to_secrets\n if allow_insecure_access is not None:\n api_key.allow_insecure_access = allow_insecure_access\n api_key.save()\n return Response(status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Deletes an api_key :param request: :param args: :param kwargs: :return: 200 / 400\"\"\"\n serializer = DeleteAPIKeySerializer(data=request.data, context=self.get_serializer_context())\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n api_key = serializer.validated_data.get('api_key')\n api_key.delete()\n return Response(status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "psono/restapi/views/api_key.py", "source_repo": "psono/psono-server", "split": "val", "star_events_count": 76} {"blob_id": "d6a1b10239710e84255053a1949b25a24290514c", "bodies": ["lo, hi = (0, len(nums) - 1)\nwhile lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n lo = mid + 1\n elif nums[mid] > target:\n hi = mid - 1\nreturn False", "if len(set(nums)) <= 1:\n return True\nelif nums[0] < nums[-1]:\n return True\nelse:\n return False", "lo, hi = (0, len(nums) - 1)\nmid = (lo + hi) // 2 + 1\nfirst, second = (nums[:mid], nums[mid:])\nA, B = (self.isSortedQuick(first), self.isSortedQuick(second))\nif A and B:\n return self.binarySearch(first, target) or self.binarySearch(second, target)\nelif A:\n return self.binarySearch(first, target) or self.search(second, target)\nelif B:\n return self.binarySearch(second, target) or self.search(first, target)"], "bodies_text": "<|body_start_0|>\n lo, hi = (0, len(nums) - 1)\n while lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n lo = mid + 1\n elif nums[mid] > target:\n hi = mid - 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(set(nums)) <= 1:\n return True\n elif nums[0] < nums[-1]:\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n lo, hi = (0, len(nums) - 1)\n mid = (lo + hi) // 2 + 1\n first, second = (nums[:mid], nums[mid:])\n A, B = (self.isSortedQuick(first), self.isSortedQuick(second))\n if A and B:\n return self.binarySearch(first, target) or self.binarySearch(second, target)\n elif A:\n return self.binarySearch(first, target) or self.search(second, target)\n elif B:\n return self.binarySearch(second, target) or self.search(first, target)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution_A1", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution_A1:\n\n def binarySearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Helper regular binary search if a value is in sorted list\"\"\"\n <|body_0|>\n\n def isSortedQuick(self, nums: List[int]) -> bool:\n \"\"\"Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\"\"\"\n <|body_1|>\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n lo, hi = (0, len(nums) - 1)\n while lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n lo = mid + 1\n elif nums[mid] > target:\n hi = mid - 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(set(nums)) <= 1:\n return True\n elif nums[0] < nums[-1]:\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n lo, hi = (0, len(nums) - 1)\n mid = (lo + hi) // 2 + 1\n first, second = (nums[:mid], nums[mid:])\n A, B = (self.isSortedQuick(first), self.isSortedQuick(second))\n if A and B:\n return self.binarySearch(first, target) or self.binarySearch(second, target)\n elif A:\n return self.binarySearch(first, target) or self.search(second, target)\n elif B:\n return self.binarySearch(second, target) or self.search(first, target)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000271", "length_bytes": 6122, "license_type": "permissive", "methods": [{"docstring": "Helper regular binary search if a value is in sorted list", "name": "binarySearch", "signature": "def binarySearch(self, nums: List[int], target: int) -> bool"}, {"docstring": "Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.", "name": "isSortedQuick", "signature": "def isSortedQuick(self, nums: List[int]) -> bool"}, {"docstring": "Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half", "name": "search", "signature": "def search(self, nums: List[int], target: int) -> bool"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001465", "prompt": "Implement the Python class `Solution_A1` described below.\n\nClass description:\nImplement the Solution_A1 class.\n\nMethod signatures and docstrings:\n- def binarySearch(self, nums: List[int], target: int) -> bool: Helper regular binary search if a value is in sorted list\n- def isSortedQuick(self, nums: List[int]) -> bool: Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\n- def search(self, nums: List[int], target: int) -> bool: Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half", "prompted_full_text": "Implement the Python class `Solution_A1` described below.\n\nClass description:\nImplement the Solution_A1 class.\n\nMethod signatures and docstrings:\n- def binarySearch(self, nums: List[int], target: int) -> bool: Helper regular binary search if a value is in sorted list\n- def isSortedQuick(self, nums: List[int]) -> bool: Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\n- def search(self, nums: List[int], target: int) -> bool: Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half\n\n<|skeleton|>\nclass Solution_A1:\n\n def binarySearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Helper regular binary search if a value is in sorted list\"\"\"\n <|body_0|>\n\n def isSortedQuick(self, nums: List[int]) -> bool:\n \"\"\"Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\"\"\"\n <|body_1|>\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n lo, hi = (0, len(nums) - 1)\n while lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n lo = mid + 1\n elif nums[mid] > target:\n hi = mid - 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(set(nums)) <= 1:\n return True\n elif nums[0] < nums[-1]:\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n lo, hi = (0, len(nums) - 1)\n mid = (lo + hi) // 2 + 1\n first, second = (nums[:mid], nums[mid:])\n A, B = (self.isSortedQuick(first), self.isSortedQuick(second))\n if A and B:\n return self.binarySearch(first, target) or self.binarySearch(second, target)\n elif A:\n return self.binarySearch(first, target) or self.search(second, target)\n elif B:\n return self.binarySearch(second, target) or self.search(first, target)\n<|end_body_2|>\n", "revision_id": "143422321cbc3715ca08f6c3af8f960a55887ced", "skeleton": "<|skeleton|>\nclass Solution_A1:\n\n def binarySearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Helper regular binary search if a value is in sorted list\"\"\"\n <|body_0|>\n\n def isSortedQuick(self, nums: List[int]) -> bool:\n \"\"\"Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\"\"\"\n <|body_1|>\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution_A1:\n def binarySearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Helper regular binary search if a value is in sorted list\"\"\"\n lo, hi = (0, len(nums) - 1)\n while lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid] == target:\n return True\n elif nums[mid] < target:\n lo = mid + 1\n elif nums[mid] > target:\n hi = mid - 1\n return False\n\n def isSortedQuick(self, nums: List[int]) -> bool:\n \"\"\"Helper function specialized for this question Quickly tell whether an array is sorted on two condition: 1. First element < Last Element 2. If the array has only one repeating elements.\"\"\"\n if len(set(nums)) <= 1:\n return True\n elif nums[0] < nums[-1]:\n return True\n else:\n return False\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"Recursive method to break the array into two halves: - binary search the sorted part - Recursive run on the other half\"\"\"\n lo, hi = (0, len(nums) - 1)\n mid = (lo + hi) // 2 + 1\n first, second = (nums[:mid], nums[mid:])\n A, B = (self.isSortedQuick(first), self.isSortedQuick(second))\n if A and B:\n return self.binarySearch(first, target) or self.binarySearch(second, target)\n elif A:\n return self.binarySearch(first, target) or self.search(second, target)\n elif B:\n return self.binarySearch(second, target) or self.search(first, target)\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/LC081_search_in_rotated_sorted_array_ii.py", "source_repo": "jxie0755/Learning_Python", "split": "val", "star_events_count": 0} {"blob_id": "7c010d0271e2abcc1248b638d0706518bee0d77a", "bodies": ["self.prefix = filename\nself.backup_count = backup_count\nself.interval = interval.upper()\nself.re_match = '^\\\\d{4}-\\\\d{2}-\\\\d{2}'\nself.interval_formater_dict = {'S': '%Y-%m-%d-%H-%M-%S', 'M': '%Y-%m-%d-%H-%M', 'H': '%Y-%m-%d-%H', 'D': '%Y-%m-%d'}\nself.formater = self.interval_formater_dict.get(interval)\nif not self.formater:\n raise ValueError(u'指定的日期间隔单位无效: %s' % self.interval)\nself.file_path = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n_dir = os.path.dirname(self.file_path)\ntry:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\nexcept Exception as ex:\n print(f'创建log文件夹{self.file_path}失败:{str(ex)}', file=sys.stderr)\n pass\nprint(u'MultiprocessHandler create logger:{}'.format(self.file_path))\nlogging.FileHandler.__init__(self, self.file_path, 'a+', encoding, delay)", "_filePath = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\nif _filePath != self.file_path:\n self.file_path = _filePath\n return True\nreturn False", "self.baseFilename = os.path.abspath(self.file_path)\nif self.stream:\n self.stream.close()\n self.stream = None\nif not self.delay:\n self.stream = self._open()\nif self.backup_count > 0:\n print('删除日志')\n for s in self.get_expired_files():\n print(s)\n os.remove(s)", "dir_name, _ = os.path.split(self.baseFilename)\nfile_names = os.listdir(dir_name)\nresult = []\nprefix = self.prefix + '.'\nplen = len(prefix)\nfor file_name in file_names:\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n if re.compile(self.re_match).match(suffix):\n result.append(os.path.join(dir_name, file_name))\nresult.sort()\nif len(result) < self.backup_count:\n result = []\nelse:\n result = result[:len(result) - self.backup_count]\nreturn result", "try:\n if self.should_change_file():\n self.do_change_file()\n logging.FileHandler.emit(self, record)\nexcept (KeyboardInterrupt, SystemExit):\n raise\nexcept Exception:\n self.handleError(record)"], "bodies_text": "<|body_start_0|>\n self.prefix = filename\n self.backup_count = backup_count\n self.interval = interval.upper()\n self.re_match = '^\\\\d{4}-\\\\d{2}-\\\\d{2}'\n self.interval_formater_dict = {'S': '%Y-%m-%d-%H-%M-%S', 'M': '%Y-%m-%d-%H-%M', 'H': '%Y-%m-%d-%H', 'D': '%Y-%m-%d'}\n self.formater = self.interval_formater_dict.get(interval)\n if not self.formater:\n raise ValueError(u'指定的日期间隔单位无效: %s' % self.interval)\n self.file_path = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n _dir = os.path.dirname(self.file_path)\n try:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\n except Exception as ex:\n print(f'创建log文件夹{self.file_path}失败:{str(ex)}', file=sys.stderr)\n pass\n print(u'MultiprocessHandler create logger:{}'.format(self.file_path))\n logging.FileHandler.__init__(self, self.file_path, 'a+', encoding, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n _filePath = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n if _filePath != self.file_path:\n self.file_path = _filePath\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n self.baseFilename = os.path.abspath(self.file_path)\n if self.stream:\n self.stream.close()\n self.stream = None\n if not self.delay:\n self.stream = self._open()\n if self.backup_count > 0:\n print('删除日志')\n for s in self.get_expired_files():\n print(s)\n os.remove(s)\n<|end_body_2|>\n\n<|body_start_3|>\n dir_name, _ = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n prefix = self.prefix + '.'\n plen = len(prefix)\n for file_name in file_names:\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n if re.compile(self.re_match).match(suffix):\n result.append(os.path.join(dir_name, file_name))\n result.sort()\n if len(result) < self.backup_count:\n result = []\n else:\n result = result[:len(result) - self.backup_count]\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.should_change_file():\n self.do_change_file()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n<|end_body_4|>\n", "class_docstring": "支持多进程的TimedRotatingFileHandler", "class_name": "MultiprocessHandler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiprocessHandler:\n \"\"\"支持多进程的TimedRotatingFileHandler\"\"\"\n\n def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False):\n \"\"\"filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\"\"\"\n <|body_0|>\n\n def should_change_file(self):\n \"\"\"更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\"\"\"\n <|body_1|>\n\n def do_change_file(self):\n \"\"\"输出信息到日志文件,并删除多于保留个数的所有日志文件\"\"\"\n <|body_2|>\n\n def get_expired_files(self):\n \"\"\"获得过期需要删除的日志文件\"\"\"\n <|body_3|>\n\n def emit(self, record):\n \"\"\"发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prefix = filename\n self.backup_count = backup_count\n self.interval = interval.upper()\n self.re_match = '^\\\\d{4}-\\\\d{2}-\\\\d{2}'\n self.interval_formater_dict = {'S': '%Y-%m-%d-%H-%M-%S', 'M': '%Y-%m-%d-%H-%M', 'H': '%Y-%m-%d-%H', 'D': '%Y-%m-%d'}\n self.formater = self.interval_formater_dict.get(interval)\n if not self.formater:\n raise ValueError(u'指定的日期间隔单位无效: %s' % self.interval)\n self.file_path = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n _dir = os.path.dirname(self.file_path)\n try:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\n except Exception as ex:\n print(f'创建log文件夹{self.file_path}失败:{str(ex)}', file=sys.stderr)\n pass\n print(u'MultiprocessHandler create logger:{}'.format(self.file_path))\n logging.FileHandler.__init__(self, self.file_path, 'a+', encoding, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n _filePath = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n if _filePath != self.file_path:\n self.file_path = _filePath\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n self.baseFilename = os.path.abspath(self.file_path)\n if self.stream:\n self.stream.close()\n self.stream = None\n if not self.delay:\n self.stream = self._open()\n if self.backup_count > 0:\n print('删除日志')\n for s in self.get_expired_files():\n print(s)\n os.remove(s)\n<|end_body_2|>\n\n<|body_start_3|>\n dir_name, _ = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n prefix = self.prefix + '.'\n plen = len(prefix)\n for file_name in file_names:\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n if re.compile(self.re_match).match(suffix):\n result.append(os.path.join(dir_name, file_name))\n result.sort()\n if len(result) < self.backup_count:\n result = []\n else:\n result = result[:len(result) - self.backup_count]\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.should_change_file():\n self.do_change_file()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000272", "length_bytes": 9763, "license_type": "permissive", "methods": [{"docstring": "filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件", "name": "__init__", "signature": "def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False)"}, {"docstring": "更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改", "name": "should_change_file", "signature": "def should_change_file(self)"}, {"docstring": "输出信息到日志文件,并删除多于保留个数的所有日志文件", "name": "do_change_file", "signature": "def do_change_file(self)"}, {"docstring": "获得过期需要删除的日志文件", "name": "get_expired_files", "signature": "def get_expired_files(self)"}, {"docstring": "发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法", "name": "emit", "signature": "def emit(self, record)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_000969", "prompt": "Implement the Python class `MultiprocessHandler` described below.\n\nClass description:\n支持多进程的TimedRotatingFileHandler\n\nMethod signatures and docstrings:\n- def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False): filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\n- def should_change_file(self): 更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\n- def do_change_file(self): 输出信息到日志文件,并删除多于保留个数的所有日志文件\n- def get_expired_files(self): 获得过期需要删除的日志文件\n- def emit(self, record): 发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法", "prompted_full_text": "Implement the Python class `MultiprocessHandler` described below.\n\nClass description:\n支持多进程的TimedRotatingFileHandler\n\nMethod signatures and docstrings:\n- def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False): filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\n- def should_change_file(self): 更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\n- def do_change_file(self): 输出信息到日志文件,并删除多于保留个数的所有日志文件\n- def get_expired_files(self): 获得过期需要删除的日志文件\n- def emit(self, record): 发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法\n\n<|skeleton|>\nclass MultiprocessHandler:\n \"\"\"支持多进程的TimedRotatingFileHandler\"\"\"\n\n def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False):\n \"\"\"filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\"\"\"\n <|body_0|>\n\n def should_change_file(self):\n \"\"\"更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\"\"\"\n <|body_1|>\n\n def do_change_file(self):\n \"\"\"输出信息到日志文件,并删除多于保留个数的所有日志文件\"\"\"\n <|body_2|>\n\n def get_expired_files(self):\n \"\"\"获得过期需要删除的日志文件\"\"\"\n <|body_3|>\n\n def emit(self, record):\n \"\"\"发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prefix = filename\n self.backup_count = backup_count\n self.interval = interval.upper()\n self.re_match = '^\\\\d{4}-\\\\d{2}-\\\\d{2}'\n self.interval_formater_dict = {'S': '%Y-%m-%d-%H-%M-%S', 'M': '%Y-%m-%d-%H-%M', 'H': '%Y-%m-%d-%H', 'D': '%Y-%m-%d'}\n self.formater = self.interval_formater_dict.get(interval)\n if not self.formater:\n raise ValueError(u'指定的日期间隔单位无效: %s' % self.interval)\n self.file_path = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n _dir = os.path.dirname(self.file_path)\n try:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\n except Exception as ex:\n print(f'创建log文件夹{self.file_path}失败:{str(ex)}', file=sys.stderr)\n pass\n print(u'MultiprocessHandler create logger:{}'.format(self.file_path))\n logging.FileHandler.__init__(self, self.file_path, 'a+', encoding, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n _filePath = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n if _filePath != self.file_path:\n self.file_path = _filePath\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n self.baseFilename = os.path.abspath(self.file_path)\n if self.stream:\n self.stream.close()\n self.stream = None\n if not self.delay:\n self.stream = self._open()\n if self.backup_count > 0:\n print('删除日志')\n for s in self.get_expired_files():\n print(s)\n os.remove(s)\n<|end_body_2|>\n\n<|body_start_3|>\n dir_name, _ = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n prefix = self.prefix + '.'\n plen = len(prefix)\n for file_name in file_names:\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n if re.compile(self.re_match).match(suffix):\n result.append(os.path.join(dir_name, file_name))\n result.sort()\n if len(result) < self.backup_count:\n result = []\n else:\n result = result[:len(result) - self.backup_count]\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n try:\n if self.should_change_file():\n self.do_change_file()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n<|end_body_4|>\n", "revision_id": "7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7", "skeleton": "<|skeleton|>\nclass MultiprocessHandler:\n \"\"\"支持多进程的TimedRotatingFileHandler\"\"\"\n\n def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False):\n \"\"\"filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\"\"\"\n <|body_0|>\n\n def should_change_file(self):\n \"\"\"更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\"\"\"\n <|body_1|>\n\n def do_change_file(self):\n \"\"\"输出信息到日志文件,并删除多于保留个数的所有日志文件\"\"\"\n <|body_2|>\n\n def get_expired_files(self):\n \"\"\"获得过期需要删除的日志文件\"\"\"\n <|body_3|>\n\n def emit(self, record):\n \"\"\"发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MultiprocessHandler:\n \"\"\"支持多进程的TimedRotatingFileHandler\"\"\"\n\n def __init__(self, filename: str, interval: str='D', backup_count: int=0, encoding: str=None, delay: bool=False):\n \"\"\"filename 日志文件名, interval 时间间隔的单位, backup_count 保留文件个数,0表示不删除 delay 是否开启 OutSteam缓存 True 表示开启缓存,OutStream输出到缓存,待缓存区满后,刷新缓存区,并输出缓存数据到文件。 False表示不缓存,OutStrea直接输出到文件\"\"\"\n self.prefix = filename\n self.backup_count = backup_count\n self.interval = interval.upper()\n self.re_match = '^\\\\d{4}-\\\\d{2}-\\\\d{2}'\n self.interval_formater_dict = {'S': '%Y-%m-%d-%H-%M-%S', 'M': '%Y-%m-%d-%H-%M', 'H': '%Y-%m-%d-%H', 'D': '%Y-%m-%d'}\n self.formater = self.interval_formater_dict.get(interval)\n if not self.formater:\n raise ValueError(u'指定的日期间隔单位无效: %s' % self.interval)\n self.file_path = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n _dir = os.path.dirname(self.file_path)\n try:\n if not os.path.exists(_dir):\n os.makedirs(_dir)\n except Exception as ex:\n print(f'创建log文件夹{self.file_path}失败:{str(ex)}', file=sys.stderr)\n pass\n print(u'MultiprocessHandler create logger:{}'.format(self.file_path))\n logging.FileHandler.__init__(self, self.file_path, 'a+', encoding, delay)\n\n def should_change_file(self):\n \"\"\"更改日志写入目的写入文件 :return True 表示已更改,False 表示未更改\"\"\"\n _filePath = u'{}_{}.log'.format(self.prefix, datetime.now().strftime(self.formater))\n if _filePath != self.file_path:\n self.file_path = _filePath\n return True\n return False\n\n def do_change_file(self):\n \"\"\"输出信息到日志文件,并删除多于保留个数的所有日志文件\"\"\"\n self.baseFilename = os.path.abspath(self.file_path)\n if self.stream:\n self.stream.close()\n self.stream = None\n if not self.delay:\n self.stream = self._open()\n if self.backup_count > 0:\n print('删除日志')\n for s in self.get_expired_files():\n print(s)\n os.remove(s)\n\n def get_expired_files(self):\n \"\"\"获得过期需要删除的日志文件\"\"\"\n dir_name, _ = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n prefix = self.prefix + '.'\n plen = len(prefix)\n for file_name in file_names:\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n if re.compile(self.re_match).match(suffix):\n result.append(os.path.join(dir_name, file_name))\n result.sort()\n if len(result) < self.backup_count:\n result = []\n else:\n result = result[:len(result) - self.backup_count]\n return result\n\n def emit(self, record):\n \"\"\"发送一个日志记录 覆盖FileHandler中的emit方法,logging会自动调用此方法\"\"\"\n try:\n if self.should_change_file():\n self.do_change_file()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n", "source": "the_stack_v2_python_sparse", "source_path": "vnpy/trader/util_logger.py", "source_repo": "msincenselee/vnpy", "split": "val", "star_events_count": 359} {"blob_id": "a6d54e3d94d5674435b144b4f4f099c24316cbbe", "bodies": ["sub_list = re.findall('.{%s}' % self.word_len, substring)\nsub_dict = self.gen_dict(sub_list)\nif sub_dict == self.word_dic:\n return True\nreturn False", "word_dic = dict.fromkeys(word_list, 0)\nfor w in word_list:\n word_dic[w] += 1\nreturn word_dic", "if len(words) == 0:\n return []\nself.word_len = len(words[0])\nself.word_num = len(words)\nself.word_dic = self.gen_dict(words)\nres = []\nfor i in range(len(s) - self.word_len * self.word_num + 1):\n if self.check(s[i:self.word_len * self.word_num + i]):\n res.append(i)\nreturn res"], "bodies_text": "<|body_start_0|>\n sub_list = re.findall('.{%s}' % self.word_len, substring)\n sub_dict = self.gen_dict(sub_list)\n if sub_dict == self.word_dic:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n word_dic = dict.fromkeys(word_list, 0)\n for w in word_list:\n word_dic[w] += 1\n return word_dic\n<|end_body_1|>\n\n<|body_start_2|>\n if len(words) == 0:\n return []\n self.word_len = len(words[0])\n self.word_num = len(words)\n self.word_dic = self.gen_dict(words)\n res = []\n for i in range(len(s) - self.word_len * self.word_num + 1):\n if self.check(s[i:self.word_len * self.word_num + i]):\n res.append(i)\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def check(self, substring):\n \"\"\":type substring: str :rtype: bool\"\"\"\n <|body_0|>\n\n def gen_dict(word_list):\n \"\"\":type word_list: List[str :rtype: dict\"\"\"\n <|body_1|>\n\n def findSubstring(self, s, words):\n \"\"\":type s: str :type words: List[str] :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sub_list = re.findall('.{%s}' % self.word_len, substring)\n sub_dict = self.gen_dict(sub_list)\n if sub_dict == self.word_dic:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n word_dic = dict.fromkeys(word_list, 0)\n for w in word_list:\n word_dic[w] += 1\n return word_dic\n<|end_body_1|>\n\n<|body_start_2|>\n if len(words) == 0:\n return []\n self.word_len = len(words[0])\n self.word_num = len(words)\n self.word_dic = self.gen_dict(words)\n res = []\n for i in range(len(s) - self.word_len * self.word_num + 1):\n if self.check(s[i:self.word_len * self.word_num + i]):\n res.append(i)\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000273", "length_bytes": 2182, "license_type": "no_license", "methods": [{"docstring": ":type substring: str :rtype: bool", "name": "check", "signature": "def check(self, substring)"}, {"docstring": ":type word_list: List[str :rtype: dict", "name": "gen_dict", "signature": "def gen_dict(word_list)"}, {"docstring": ":type s: str :type words: List[str] :rtype: List[int]", "name": "findSubstring", "signature": "def findSubstring(self, s, words)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004676", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def check(self, substring): :type substring: str :rtype: bool\n- def gen_dict(word_list): :type word_list: List[str :rtype: dict\n- def findSubstring(self, s, words): :type s: str :type words: List[str] :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def check(self, substring): :type substring: str :rtype: bool\n- def gen_dict(word_list): :type word_list: List[str :rtype: dict\n- def findSubstring(self, s, words): :type s: str :type words: List[str] :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def check(self, substring):\n \"\"\":type substring: str :rtype: bool\"\"\"\n <|body_0|>\n\n def gen_dict(word_list):\n \"\"\":type word_list: List[str :rtype: dict\"\"\"\n <|body_1|>\n\n def findSubstring(self, s, words):\n \"\"\":type s: str :type words: List[str] :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sub_list = re.findall('.{%s}' % self.word_len, substring)\n sub_dict = self.gen_dict(sub_list)\n if sub_dict == self.word_dic:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n word_dic = dict.fromkeys(word_list, 0)\n for w in word_list:\n word_dic[w] += 1\n return word_dic\n<|end_body_1|>\n\n<|body_start_2|>\n if len(words) == 0:\n return []\n self.word_len = len(words[0])\n self.word_num = len(words)\n self.word_dic = self.gen_dict(words)\n res = []\n for i in range(len(s) - self.word_len * self.word_num + 1):\n if self.check(s[i:self.word_len * self.word_num + i]):\n res.append(i)\n return res\n<|end_body_2|>\n", "revision_id": "f8f3b0cdb47ee6bb4bf9bdc7c2a983f4a882d9dd", "skeleton": "<|skeleton|>\nclass Solution:\n\n def check(self, substring):\n \"\"\":type substring: str :rtype: bool\"\"\"\n <|body_0|>\n\n def gen_dict(word_list):\n \"\"\":type word_list: List[str :rtype: dict\"\"\"\n <|body_1|>\n\n def findSubstring(self, s, words):\n \"\"\":type s: str :type words: List[str] :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def check(self, substring):\n \"\"\":type substring: str :rtype: bool\"\"\"\n sub_list = re.findall('.{%s}' % self.word_len, substring)\n sub_dict = self.gen_dict(sub_list)\n if sub_dict == self.word_dic:\n return True\n return False\n\n def gen_dict(word_list):\n \"\"\":type word_list: List[str :rtype: dict\"\"\"\n word_dic = dict.fromkeys(word_list, 0)\n for w in word_list:\n word_dic[w] += 1\n return word_dic\n\n def findSubstring(self, s, words):\n \"\"\":type s: str :type words: List[str] :rtype: List[int]\"\"\"\n if len(words) == 0:\n return []\n self.word_len = len(words[0])\n self.word_num = len(words)\n self.word_dic = self.gen_dict(words)\n res = []\n for i in range(len(s) - self.word_len * self.word_num + 1):\n if self.check(s[i:self.word_len * self.word_num + i]):\n res.append(i)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "solutions/030-substring-with-concatenation-of-all-words/main.py", "source_repo": "CallMeNP/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "f58aaa66a860ff7b50683f7d8ec80a8e9b9ae155", "bodies": ["\"\"\":field\n The object ID.\n \"\"\"\nself.object_id: int = object_id\nself._solver_id: int = solver_id\nself._object_index: int = object_index\n':field\\n The positions of each particle as a numpy array.\\n '\nself.positions: np.ndarray = np.array([], dtype=np.float32)\n':field\\n The velocities of each particle as a numpy array.\\n '\nself.velocities: np.ndarray = np.array([], dtype=np.float32)", "solver_indices = obi_particles.get_solver_indices(self._object_index)[:obi_particles.get_count(self._object_index)]\nself.positions = np.delete(np.take(obi_particles.get_positions(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\nself.velocities = np.delete(np.take(obi_particles.get_velocities(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)"], "bodies_text": "<|body_start_0|>\n \"\"\":field\n The object ID.\n \"\"\"\n self.object_id: int = object_id\n self._solver_id: int = solver_id\n self._object_index: int = object_index\n ':field\\n The positions of each particle as a numpy array.\\n '\n self.positions: np.ndarray = np.array([], dtype=np.float32)\n ':field\\n The velocities of each particle as a numpy array.\\n '\n self.velocities: np.ndarray = np.array([], dtype=np.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n solver_indices = obi_particles.get_solver_indices(self._object_index)[:obi_particles.get_count(self._object_index)]\n self.positions = np.delete(np.take(obi_particles.get_positions(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n self.velocities = np.delete(np.take(obi_particles.get_velocities(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n<|end_body_1|>\n", "class_docstring": "Data for an Obi actor.", "class_name": "ObiActor", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObiActor:\n \"\"\"Data for an Obi actor.\"\"\"\n\n def __init__(self, object_id: int, solver_id: int, object_index: int):\n \"\"\":param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\"\"\"\n <|body_0|>\n\n def on_communicate(self, obi_particles: ObiParticles) -> None:\n \"\"\"On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\":field\n The object ID.\n \"\"\"\n self.object_id: int = object_id\n self._solver_id: int = solver_id\n self._object_index: int = object_index\n ':field\\n The positions of each particle as a numpy array.\\n '\n self.positions: np.ndarray = np.array([], dtype=np.float32)\n ':field\\n The velocities of each particle as a numpy array.\\n '\n self.velocities: np.ndarray = np.array([], dtype=np.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n solver_indices = obi_particles.get_solver_indices(self._object_index)[:obi_particles.get_count(self._object_index)]\n self.positions = np.delete(np.take(obi_particles.get_positions(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n self.velocities = np.delete(np.take(obi_particles.get_velocities(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000274", "length_bytes": 1831, "license_type": "permissive", "methods": [{"docstring": ":param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.", "name": "__init__", "signature": "def __init__(self, object_id: int, solver_id: int, object_index: int)"}, {"docstring": "On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.", "name": "on_communicate", "signature": "def on_communicate(self, obi_particles: ObiParticles) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `ObiActor` described below.\n\nClass description:\nData for an Obi actor.\n\nMethod signatures and docstrings:\n- def __init__(self, object_id: int, solver_id: int, object_index: int): :param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\n- def on_communicate(self, obi_particles: ObiParticles) -> None: On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.", "prompted_full_text": "Implement the Python class `ObiActor` described below.\n\nClass description:\nData for an Obi actor.\n\nMethod signatures and docstrings:\n- def __init__(self, object_id: int, solver_id: int, object_index: int): :param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\n- def on_communicate(self, obi_particles: ObiParticles) -> None: On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.\n\n<|skeleton|>\nclass ObiActor:\n \"\"\"Data for an Obi actor.\"\"\"\n\n def __init__(self, object_id: int, solver_id: int, object_index: int):\n \"\"\":param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\"\"\"\n <|body_0|>\n\n def on_communicate(self, obi_particles: ObiParticles) -> None:\n \"\"\"On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\":field\n The object ID.\n \"\"\"\n self.object_id: int = object_id\n self._solver_id: int = solver_id\n self._object_index: int = object_index\n ':field\\n The positions of each particle as a numpy array.\\n '\n self.positions: np.ndarray = np.array([], dtype=np.float32)\n ':field\\n The velocities of each particle as a numpy array.\\n '\n self.velocities: np.ndarray = np.array([], dtype=np.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n solver_indices = obi_particles.get_solver_indices(self._object_index)[:obi_particles.get_count(self._object_index)]\n self.positions = np.delete(np.take(obi_particles.get_positions(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n self.velocities = np.delete(np.take(obi_particles.get_velocities(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n<|end_body_1|>\n", "revision_id": "9df96fba455b327bb360d8dd5886d8754046c690", "skeleton": "<|skeleton|>\nclass ObiActor:\n \"\"\"Data for an Obi actor.\"\"\"\n\n def __init__(self, object_id: int, solver_id: int, object_index: int):\n \"\"\":param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\"\"\"\n <|body_0|>\n\n def on_communicate(self, obi_particles: ObiParticles) -> None:\n \"\"\"On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ObiActor:\n \"\"\"Data for an Obi actor.\"\"\"\n\n def __init__(self, object_id: int, solver_id: int, object_index: int):\n \"\"\":param object_id: The object ID. :param solver_id: The ID of the object's Obi solver. :param object_index: The index of the object in the `ObiParticles` output data.\"\"\"\n \"\"\":field\n The object ID.\n \"\"\"\n self.object_id: int = object_id\n self._solver_id: int = solver_id\n self._object_index: int = object_index\n ':field\\n The positions of each particle as a numpy array.\\n '\n self.positions: np.ndarray = np.array([], dtype=np.float32)\n ':field\\n The velocities of each particle as a numpy array.\\n '\n self.velocities: np.ndarray = np.array([], dtype=np.float32)\n\n def on_communicate(self, obi_particles: ObiParticles) -> None:\n \"\"\"On `communicate()`, update `self.positions` and `self.velocities`. :param obi_particles: `ObiParticles` output data.\"\"\"\n solver_indices = obi_particles.get_solver_indices(self._object_index)[:obi_particles.get_count(self._object_index)]\n self.positions = np.delete(np.take(obi_particles.get_positions(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n self.velocities = np.delete(np.take(obi_particles.get_velocities(self._solver_id).reshape(-1, 4), solver_indices, axis=0).reshape(-1, 4), 3, 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/tdw/obi_data/obi_actor.py", "source_repo": "threedworld-mit/tdw", "split": "val", "star_events_count": 427} {"blob_id": "cccf27816f40af1bff562e2cfdede34100e2c29a", "bodies": ["self.labels_file_path = '{}/something-something-v2-labels.json'.format(config.jason_label_path)\nself.train_file_path = '{}/train_videofolder.txt'.format(config.label_path)\nself.valid_file_path = '{}/val_videofolder.txt'.format(config.label_path)\nself.test_file_path = '{}/test_videofolder.txt'.format(config.label_path)", "set_names = ['train', 'valid', 'test']\npaths = [self.train_file_path, self.valid_file_path, self.test_file_path]\nmetadata = {}\nfor set_name, path in zip(set_names, paths):\n with open(path, mode='r') as f:\n subset_metadata = {}\n for line in f:\n values = line.split()\n id = int(values[0])\n valdict = {'n_frames': values[1], 'action_label': int(values[2]), 'object_labels': None, 'set_name': set_name}\n subset_metadata[id] = valdict\n metadata[set_name] = subset_metadata\nreturn metadata", "with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\ninverse_label_dict = json.loads(label_file)\nlabel_dict = {int(value): key for key, value in inverse_label_dict.items()}\nreturn label_dict"], "bodies_text": "<|body_start_0|>\n self.labels_file_path = '{}/something-something-v2-labels.json'.format(config.jason_label_path)\n self.train_file_path = '{}/train_videofolder.txt'.format(config.label_path)\n self.valid_file_path = '{}/val_videofolder.txt'.format(config.label_path)\n self.test_file_path = '{}/test_videofolder.txt'.format(config.label_path)\n<|end_body_0|>\n\n<|body_start_1|>\n set_names = ['train', 'valid', 'test']\n paths = [self.train_file_path, self.valid_file_path, self.test_file_path]\n metadata = {}\n for set_name, path in zip(set_names, paths):\n with open(path, mode='r') as f:\n subset_metadata = {}\n for line in f:\n values = line.split()\n id = int(values[0])\n valdict = {'n_frames': values[1], 'action_label': int(values[2]), 'object_labels': None, 'set_name': set_name}\n subset_metadata[id] = valdict\n metadata[set_name] = subset_metadata\n return metadata\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key, value in inverse_label_dict.items()}\n return label_dict\n<|end_body_2|>\n", "class_docstring": "Used to load video metadata.", "class_name": "MetadataLoader", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetadataLoader:\n \"\"\"Used to load video metadata.\"\"\"\n\n def __init__(self, config):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def load_metadata(self):\n \"\"\"Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\"\"\"\n <|body_1|>\n\n def get_label_dict(self):\n \"\"\"Return a dictionary mapping label indices to label descriptions.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.labels_file_path = '{}/something-something-v2-labels.json'.format(config.jason_label_path)\n self.train_file_path = '{}/train_videofolder.txt'.format(config.label_path)\n self.valid_file_path = '{}/val_videofolder.txt'.format(config.label_path)\n self.test_file_path = '{}/test_videofolder.txt'.format(config.label_path)\n<|end_body_0|>\n\n<|body_start_1|>\n set_names = ['train', 'valid', 'test']\n paths = [self.train_file_path, self.valid_file_path, self.test_file_path]\n metadata = {}\n for set_name, path in zip(set_names, paths):\n with open(path, mode='r') as f:\n subset_metadata = {}\n for line in f:\n values = line.split()\n id = int(values[0])\n valdict = {'n_frames': values[1], 'action_label': int(values[2]), 'object_labels': None, 'set_name': set_name}\n subset_metadata[id] = valdict\n metadata[set_name] = subset_metadata\n return metadata\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key, value in inverse_label_dict.items()}\n return label_dict\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000275", "length_bytes": 2484, "license_type": "no_license", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self, config)"}, {"docstring": "Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".", "name": "load_metadata", "signature": "def load_metadata(self)"}, {"docstring": "Return a dictionary mapping label indices to label descriptions.", "name": "get_label_dict", "signature": "def get_label_dict(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004706", "prompt": "Implement the Python class `MetadataLoader` described below.\n\nClass description:\nUsed to load video metadata.\n\nMethod signatures and docstrings:\n- def __init__(self, config): Constructor.\n- def load_metadata(self): Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\n- def get_label_dict(self): Return a dictionary mapping label indices to label descriptions.", "prompted_full_text": "Implement the Python class `MetadataLoader` described below.\n\nClass description:\nUsed to load video metadata.\n\nMethod signatures and docstrings:\n- def __init__(self, config): Constructor.\n- def load_metadata(self): Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\n- def get_label_dict(self): Return a dictionary mapping label indices to label descriptions.\n\n<|skeleton|>\nclass MetadataLoader:\n \"\"\"Used to load video metadata.\"\"\"\n\n def __init__(self, config):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def load_metadata(self):\n \"\"\"Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\"\"\"\n <|body_1|>\n\n def get_label_dict(self):\n \"\"\"Return a dictionary mapping label indices to label descriptions.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.labels_file_path = '{}/something-something-v2-labels.json'.format(config.jason_label_path)\n self.train_file_path = '{}/train_videofolder.txt'.format(config.label_path)\n self.valid_file_path = '{}/val_videofolder.txt'.format(config.label_path)\n self.test_file_path = '{}/test_videofolder.txt'.format(config.label_path)\n<|end_body_0|>\n\n<|body_start_1|>\n set_names = ['train', 'valid', 'test']\n paths = [self.train_file_path, self.valid_file_path, self.test_file_path]\n metadata = {}\n for set_name, path in zip(set_names, paths):\n with open(path, mode='r') as f:\n subset_metadata = {}\n for line in f:\n values = line.split()\n id = int(values[0])\n valdict = {'n_frames': values[1], 'action_label': int(values[2]), 'object_labels': None, 'set_name': set_name}\n subset_metadata[id] = valdict\n metadata[set_name] = subset_metadata\n return metadata\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key, value in inverse_label_dict.items()}\n return label_dict\n<|end_body_2|>\n", "revision_id": "26de9802912415f5ecb85b8ede816cd5ede50e7b", "skeleton": "<|skeleton|>\nclass MetadataLoader:\n \"\"\"Used to load video metadata.\"\"\"\n\n def __init__(self, config):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def load_metadata(self):\n \"\"\"Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\"\"\"\n <|body_1|>\n\n def get_label_dict(self):\n \"\"\"Return a dictionary mapping label indices to label descriptions.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MetadataLoader:\n \"\"\"Used to load video metadata.\"\"\"\n\n def __init__(self, config):\n \"\"\"Constructor.\"\"\"\n self.labels_file_path = '{}/something-something-v2-labels.json'.format(config.jason_label_path)\n self.train_file_path = '{}/train_videofolder.txt'.format(config.label_path)\n self.valid_file_path = '{}/val_videofolder.txt'.format(config.label_path)\n self.test_file_path = '{}/test_videofolder.txt'.format(config.label_path)\n\n def load_metadata(self):\n \"\"\"Load labels. Returns: Dict with keys \"train\", \"valid\", \"test\". Each key maps to a dicts, with each keys being sample ids. Each key maps to a dict with keys \"n_frames\", \"label\", and \"setname\".\"\"\"\n set_names = ['train', 'valid', 'test']\n paths = [self.train_file_path, self.valid_file_path, self.test_file_path]\n metadata = {}\n for set_name, path in zip(set_names, paths):\n with open(path, mode='r') as f:\n subset_metadata = {}\n for line in f:\n values = line.split()\n id = int(values[0])\n valdict = {'n_frames': values[1], 'action_label': int(values[2]), 'object_labels': None, 'set_name': set_name}\n subset_metadata[id] = valdict\n metadata[set_name] = subset_metadata\n return metadata\n\n def get_label_dict(self):\n \"\"\"Return a dictionary mapping label indices to label descriptions.\"\"\"\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key, value in inverse_label_dict.items()}\n return label_dict\n", "source": "the_stack_v2_python_sparse", "source_path": "Video_Based_Human_Activity_Recognition/data_utils/metadata_loader.py", "source_repo": "jotix16/Courses", "split": "val", "star_events_count": 0} {"blob_id": "cd09f000df2d924d535ca44d285ec780068ab9a2", "bodies": ["self.domain_name = domain_name\nself.encrypted_password = encrypted_password\nself.kdc = kdc\nself.password = password\nself.protocol = protocol\nself.username = username", "if dictionary is None:\n return None\ndomain_name = dictionary.get('domainName')\nencrypted_password = dictionary.get('encryptedPassword')\nkdc = dictionary.get('kdc')\npassword = dictionary.get('password')\nprotocol = dictionary.get('protocol')\nusername = dictionary.get('username')\nreturn cls(domain_name, encrypted_password, kdc, password, protocol, username)"], "bodies_text": "<|body_start_0|>\n self.domain_name = domain_name\n self.encrypted_password = encrypted_password\n self.kdc = kdc\n self.password = password\n self.protocol = protocol\n self.username = username\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n domain_name = dictionary.get('domainName')\n encrypted_password = dictionary.get('encryptedPassword')\n kdc = dictionary.get('kdc')\n password = dictionary.get('password')\n protocol = dictionary.get('protocol')\n username = dictionary.get('username')\n return cls(domain_name, encrypted_password, kdc, password, protocol, username)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna", "class_name": "NasMountCredentials", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NasMountCredentials:\n \"\"\"Implementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\"\"\"\n\n def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None):\n \"\"\"Constructor for the NasMountCredentials class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.domain_name = domain_name\n self.encrypted_password = encrypted_password\n self.kdc = kdc\n self.password = password\n self.protocol = protocol\n self.username = username\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n domain_name = dictionary.get('domainName')\n encrypted_password = dictionary.get('encryptedPassword')\n kdc = dictionary.get('kdc')\n password = dictionary.get('password')\n protocol = dictionary.get('protocol')\n username = dictionary.get('username')\n return cls(domain_name, encrypted_password, kdc, password, protocol, username)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000276", "length_bytes": 2953, "license_type": "permissive", "methods": [{"docstring": "Constructor for the NasMountCredentials class", "name": "__init__", "signature": "def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003267", "prompt": "Implement the Python class `NasMountCredentials` described below.\n\nClass description:\nImplementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\n\nMethod signatures and docstrings:\n- def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None): Constructor for the NasMountCredentials class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `NasMountCredentials` described below.\n\nClass description:\nImplementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\n\nMethod signatures and docstrings:\n- def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None): Constructor for the NasMountCredentials class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass NasMountCredentials:\n \"\"\"Implementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\"\"\"\n\n def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None):\n \"\"\"Constructor for the NasMountCredentials class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.domain_name = domain_name\n self.encrypted_password = encrypted_password\n self.kdc = kdc\n self.password = password\n self.protocol = protocol\n self.username = username\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n domain_name = dictionary.get('domainName')\n encrypted_password = dictionary.get('encryptedPassword')\n kdc = dictionary.get('kdc')\n password = dictionary.get('password')\n protocol = dictionary.get('protocol')\n username = dictionary.get('username')\n return cls(domain_name, encrypted_password, kdc, password, protocol, username)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass NasMountCredentials:\n \"\"\"Implementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\"\"\"\n\n def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None):\n \"\"\"Constructor for the NasMountCredentials class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NasMountCredentials:\n \"\"\"Implementation of the 'NasMountCredentials' model. TODO: type description here. Attributes: domain_name (string): The name of the domain which the NAS mount credentials belong to. encrypted_password (list of long|int): AES256 encrypted password. The key for encryption should be obtained from KMS. kdc (string): KDC hostname or IP for krb5 authentication. KDC stores secret keys for a smb user and provides the krb5 tickets for authentication. password (string): The password field is only populated in RPCs. On disk, instances of this proto should not have this field set, except for legacy records. TODO(oleg): Change this field type to bytes.j protocol (int): The protocol of the NAS mount. userna\"\"\"\n\n def __init__(self, domain_name=None, encrypted_password=None, kdc=None, password=None, protocol=None, username=None):\n \"\"\"Constructor for the NasMountCredentials class\"\"\"\n self.domain_name = domain_name\n self.encrypted_password = encrypted_password\n self.kdc = kdc\n self.password = password\n self.protocol = protocol\n self.username = username\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n domain_name = dictionary.get('domainName')\n encrypted_password = dictionary.get('encryptedPassword')\n kdc = dictionary.get('kdc')\n password = dictionary.get('password')\n protocol = dictionary.get('protocol')\n username = dictionary.get('username')\n return cls(domain_name, encrypted_password, kdc, password, protocol, username)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/nas_mount_credentials.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "244766b906d701891cdc67d195314161b61ef65d", "bodies": ["try:\n account_obj = super(account_account, self).create(vals)\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(account_obj.id, account_obj.name)\n self._create_product(account_obj.id, account_obj.name, account_obj.company_id.id)\n return account_obj\nexcept Exception as e:\n err_msg = '新增科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e", "try:\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(self.id, self.name)\n self._create_product(self.id, self.name, self.company_id.id)\n else:\n _logger.info('非预算科目,不做处理...')\n return super(account_account, self).write(vals)\nexcept Exception as e:\n err_msg = '更新科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e", "try:\n _logger.info('_create_budget_pos 接收到的参数:account_id=%s,account_name=%s' % (account_id, account_name))\n budget_post_instance = self.env['account.budget.post']\n budget_post_objs = budget_post_instance.search([('name', '=', account_name)])\n if len(budget_post_objs) == 0:\n budget_post_value = {}\n budget_post_value['name'] = account_name\n budget_post_value['account_ids'] = [[6, False, [account_id]]]\n budget_post_instance.create(budget_post_value)\n else:\n _logger.info('对应科目%s的预算状况已存在' % account_name)\nexcept Exception as e:\n err_msg = '根据科目创建预算状况时出现异常:%s' % e.message\n _logger.error(err_msg)", "try:\n _logger.info('_create_product 接收到的参数:account_id=%s,account_name=%s,company_id=%s' % (account_id, account_name, company_id))\n product_product_instance = self.env['product.product']\n product_product_instance_objs = product_product_instance.search([('name', '=', account_name)])\n if len(product_product_instance_objs) == 0:\n product_product_value = {}\n product_product_value['name'] = account_name\n product_product_value['sale_ok'] = False\n product_product_value['can_be_expensed'] = True\n product_product_value['property_account_expense_id'] = account_id\n product_product_value['company_id'] = company_id\n product_product_value['taxes_id'] = False\n product_product_value['supplier_taxes_id'] = False\n product_product_instance.create(product_product_value)\n else:\n _logger.info('对应科目%s的产品已存在' % account_name)\nexcept Exception as e:\n err_msg = '根据科目创建产品时出现异常:%s' % e.message\n _logger.error(err_msg)"], "bodies_text": "<|body_start_0|>\n try:\n account_obj = super(account_account, self).create(vals)\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(account_obj.id, account_obj.name)\n self._create_product(account_obj.id, account_obj.name, account_obj.company_id.id)\n return account_obj\n except Exception as e:\n err_msg = '新增科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(self.id, self.name)\n self._create_product(self.id, self.name, self.company_id.id)\n else:\n _logger.info('非预算科目,不做处理...')\n return super(account_account, self).write(vals)\n except Exception as e:\n err_msg = '更新科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _logger.info('_create_budget_pos 接收到的参数:account_id=%s,account_name=%s' % (account_id, account_name))\n budget_post_instance = self.env['account.budget.post']\n budget_post_objs = budget_post_instance.search([('name', '=', account_name)])\n if len(budget_post_objs) == 0:\n budget_post_value = {}\n budget_post_value['name'] = account_name\n budget_post_value['account_ids'] = [[6, False, [account_id]]]\n budget_post_instance.create(budget_post_value)\n else:\n _logger.info('对应科目%s的预算状况已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建预算状况时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n _logger.info('_create_product 接收到的参数:account_id=%s,account_name=%s,company_id=%s' % (account_id, account_name, company_id))\n product_product_instance = self.env['product.product']\n product_product_instance_objs = product_product_instance.search([('name', '=', account_name)])\n if len(product_product_instance_objs) == 0:\n product_product_value = {}\n product_product_value['name'] = account_name\n product_product_value['sale_ok'] = False\n product_product_value['can_be_expensed'] = True\n product_product_value['property_account_expense_id'] = account_id\n product_product_value['company_id'] = company_id\n product_product_value['taxes_id'] = False\n product_product_value['supplier_taxes_id'] = False\n product_product_instance.create(product_product_value)\n else:\n _logger.info('对应科目%s的产品已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建产品时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_3|>\n", "class_docstring": "功能:科目增加-“是否预算科目”", "class_name": "account_account", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass account_account:\n \"\"\"功能:科目增加-“是否预算科目”\"\"\"\n\n def create(self, vals):\n \"\"\"功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\"\"\"\n <|body_1|>\n\n def _create_budget_pos(self, account_id, account_name):\n \"\"\"功能:根据“科目”创建“预算状况” :return:\"\"\"\n <|body_2|>\n\n def _create_product(self, account_id, account_name, company_id):\n \"\"\"功能:创建产品 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n account_obj = super(account_account, self).create(vals)\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(account_obj.id, account_obj.name)\n self._create_product(account_obj.id, account_obj.name, account_obj.company_id.id)\n return account_obj\n except Exception as e:\n err_msg = '新增科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(self.id, self.name)\n self._create_product(self.id, self.name, self.company_id.id)\n else:\n _logger.info('非预算科目,不做处理...')\n return super(account_account, self).write(vals)\n except Exception as e:\n err_msg = '更新科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _logger.info('_create_budget_pos 接收到的参数:account_id=%s,account_name=%s' % (account_id, account_name))\n budget_post_instance = self.env['account.budget.post']\n budget_post_objs = budget_post_instance.search([('name', '=', account_name)])\n if len(budget_post_objs) == 0:\n budget_post_value = {}\n budget_post_value['name'] = account_name\n budget_post_value['account_ids'] = [[6, False, [account_id]]]\n budget_post_instance.create(budget_post_value)\n else:\n _logger.info('对应科目%s的预算状况已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建预算状况时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n _logger.info('_create_product 接收到的参数:account_id=%s,account_name=%s,company_id=%s' % (account_id, account_name, company_id))\n product_product_instance = self.env['product.product']\n product_product_instance_objs = product_product_instance.search([('name', '=', account_name)])\n if len(product_product_instance_objs) == 0:\n product_product_value = {}\n product_product_value['name'] = account_name\n product_product_value['sale_ok'] = False\n product_product_value['can_be_expensed'] = True\n product_product_value['property_account_expense_id'] = account_id\n product_product_value['company_id'] = company_id\n product_product_value['taxes_id'] = False\n product_product_value['supplier_taxes_id'] = False\n product_product_instance.create(product_product_value)\n else:\n _logger.info('对应科目%s的产品已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建产品时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000277", "length_bytes": 4242, "license_type": "no_license", "methods": [{"docstring": "功能:如果是“预算科目”则创建“预算状况” :param vals: :return:", "name": "create", "signature": "def create(self, vals)"}, {"docstring": "功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:", "name": "write", "signature": "def write(self, vals)"}, {"docstring": "功能:根据“科目”创建“预算状况” :return:", "name": "_create_budget_pos", "signature": "def _create_budget_pos(self, account_id, account_name)"}, {"docstring": "功能:创建产品 :return:", "name": "_create_product", "signature": "def _create_product(self, account_id, account_name, company_id)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000038", "prompt": "Implement the Python class `account_account` described below.\n\nClass description:\n功能:科目增加-“是否预算科目”\n\nMethod signatures and docstrings:\n- def create(self, vals): 功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\n- def write(self, vals): 功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\n- def _create_budget_pos(self, account_id, account_name): 功能:根据“科目”创建“预算状况” :return:\n- def _create_product(self, account_id, account_name, company_id): 功能:创建产品 :return:", "prompted_full_text": "Implement the Python class `account_account` described below.\n\nClass description:\n功能:科目增加-“是否预算科目”\n\nMethod signatures and docstrings:\n- def create(self, vals): 功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\n- def write(self, vals): 功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\n- def _create_budget_pos(self, account_id, account_name): 功能:根据“科目”创建“预算状况” :return:\n- def _create_product(self, account_id, account_name, company_id): 功能:创建产品 :return:\n\n<|skeleton|>\nclass account_account:\n \"\"\"功能:科目增加-“是否预算科目”\"\"\"\n\n def create(self, vals):\n \"\"\"功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\"\"\"\n <|body_1|>\n\n def _create_budget_pos(self, account_id, account_name):\n \"\"\"功能:根据“科目”创建“预算状况” :return:\"\"\"\n <|body_2|>\n\n def _create_product(self, account_id, account_name, company_id):\n \"\"\"功能:创建产品 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n account_obj = super(account_account, self).create(vals)\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(account_obj.id, account_obj.name)\n self._create_product(account_obj.id, account_obj.name, account_obj.company_id.id)\n return account_obj\n except Exception as e:\n err_msg = '新增科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(self.id, self.name)\n self._create_product(self.id, self.name, self.company_id.id)\n else:\n _logger.info('非预算科目,不做处理...')\n return super(account_account, self).write(vals)\n except Exception as e:\n err_msg = '更新科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _logger.info('_create_budget_pos 接收到的参数:account_id=%s,account_name=%s' % (account_id, account_name))\n budget_post_instance = self.env['account.budget.post']\n budget_post_objs = budget_post_instance.search([('name', '=', account_name)])\n if len(budget_post_objs) == 0:\n budget_post_value = {}\n budget_post_value['name'] = account_name\n budget_post_value['account_ids'] = [[6, False, [account_id]]]\n budget_post_instance.create(budget_post_value)\n else:\n _logger.info('对应科目%s的预算状况已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建预算状况时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n _logger.info('_create_product 接收到的参数:account_id=%s,account_name=%s,company_id=%s' % (account_id, account_name, company_id))\n product_product_instance = self.env['product.product']\n product_product_instance_objs = product_product_instance.search([('name', '=', account_name)])\n if len(product_product_instance_objs) == 0:\n product_product_value = {}\n product_product_value['name'] = account_name\n product_product_value['sale_ok'] = False\n product_product_value['can_be_expensed'] = True\n product_product_value['property_account_expense_id'] = account_id\n product_product_value['company_id'] = company_id\n product_product_value['taxes_id'] = False\n product_product_value['supplier_taxes_id'] = False\n product_product_instance.create(product_product_value)\n else:\n _logger.info('对应科目%s的产品已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建产品时出现异常:%s' % e.message\n _logger.error(err_msg)\n<|end_body_3|>\n", "revision_id": "5a4fd72991c846d5cb7c5082f6bdfef5b2bca572", "skeleton": "<|skeleton|>\nclass account_account:\n \"\"\"功能:科目增加-“是否预算科目”\"\"\"\n\n def create(self, vals):\n \"\"\"功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\"\"\"\n <|body_1|>\n\n def _create_budget_pos(self, account_id, account_name):\n \"\"\"功能:根据“科目”创建“预算状况” :return:\"\"\"\n <|body_2|>\n\n def _create_product(self, account_id, account_name, company_id):\n \"\"\"功能:创建产品 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class account_account:\n \"\"\"功能:科目增加-“是否预算科目”\"\"\"\n\n def create(self, vals):\n \"\"\"功能:如果是“预算科目”则创建“预算状况” :param vals: :return:\"\"\"\n try:\n account_obj = super(account_account, self).create(vals)\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(account_obj.id, account_obj.name)\n self._create_product(account_obj.id, account_obj.name, account_obj.company_id.id)\n return account_obj\n except Exception as e:\n err_msg = '新增科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n\n def write(self, vals):\n \"\"\"功能:由“非预算科目”更改为“预算科目时”处理预算状况 :param vals: :return:\"\"\"\n try:\n is_budget = vals.get('budget', False)\n if is_budget:\n _logger.info('预算科目,准备创建预算状况、产品...')\n self._create_budget_pos(self.id, self.name)\n self._create_product(self.id, self.name, self.company_id.id)\n else:\n _logger.info('非预算科目,不做处理...')\n return super(account_account, self).write(vals)\n except Exception as e:\n err_msg = '更新科目出现异常,异常信息:%s' % e.message\n _logger.error(err_msg)\n raise e\n\n def _create_budget_pos(self, account_id, account_name):\n \"\"\"功能:根据“科目”创建“预算状况” :return:\"\"\"\n try:\n _logger.info('_create_budget_pos 接收到的参数:account_id=%s,account_name=%s' % (account_id, account_name))\n budget_post_instance = self.env['account.budget.post']\n budget_post_objs = budget_post_instance.search([('name', '=', account_name)])\n if len(budget_post_objs) == 0:\n budget_post_value = {}\n budget_post_value['name'] = account_name\n budget_post_value['account_ids'] = [[6, False, [account_id]]]\n budget_post_instance.create(budget_post_value)\n else:\n _logger.info('对应科目%s的预算状况已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建预算状况时出现异常:%s' % e.message\n _logger.error(err_msg)\n\n def _create_product(self, account_id, account_name, company_id):\n \"\"\"功能:创建产品 :return:\"\"\"\n try:\n _logger.info('_create_product 接收到的参数:account_id=%s,account_name=%s,company_id=%s' % (account_id, account_name, company_id))\n product_product_instance = self.env['product.product']\n product_product_instance_objs = product_product_instance.search([('name', '=', account_name)])\n if len(product_product_instance_objs) == 0:\n product_product_value = {}\n product_product_value['name'] = account_name\n product_product_value['sale_ok'] = False\n product_product_value['can_be_expensed'] = True\n product_product_value['property_account_expense_id'] = account_id\n product_product_value['company_id'] = company_id\n product_product_value['taxes_id'] = False\n product_product_value['supplier_taxes_id'] = False\n product_product_instance.create(product_product_value)\n else:\n _logger.info('对应科目%s的产品已存在' % account_name)\n except Exception as e:\n err_msg = '根据科目创建产品时出现异常:%s' % e.message\n _logger.error(err_msg)\n", "source": "the_stack_v2_python_sparse", "source_path": "yuancloud/plugin/account_budget_department/models/account_account.py", "source_repo": "cash2one/yuancloud", "split": "val", "star_events_count": 0} {"blob_id": "4dcac7c8e6539a65aca63901807903f1c823858b", "bodies": ["ctx = self.server.context\nres = {}\nres['method'] = self.command\nres['path'] = self.path\nres['headers'] = self.headers.items()\nres['request_version'] = self.request_version\nif self.headers.get('Content-Length') is not None:\n body_length = int(self.headers.get('Content-Length'))\n res['request_body'] = self.rfile.read(body_length).decode('utf-8')\nelse:\n res['request_body'] = None\nres['request_time'] = time.time()\nwith ctx.lock:\n ctx.data['requests'].append(res)\nmsg_fmt = '[Endpoint `%s`] Request recorded: `%s`'\nlog.debug(msg_fmt, ctx.data['endpoint_id'], res)", "ctx = self.server.context\nif ctx.data['record_requests']:\n self._record_request()\nif ctx.data['encoded_response']:\n msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested'\n log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response'])\n self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response'])\n return True\nreturn super()._process_commands(content_type, blob)"], "bodies_text": "<|body_start_0|>\n ctx = self.server.context\n res = {}\n res['method'] = self.command\n res['path'] = self.path\n res['headers'] = self.headers.items()\n res['request_version'] = self.request_version\n if self.headers.get('Content-Length') is not None:\n body_length = int(self.headers.get('Content-Length'))\n res['request_body'] = self.rfile.read(body_length).decode('utf-8')\n else:\n res['request_body'] = None\n res['request_time'] = time.time()\n with ctx.lock:\n ctx.data['requests'].append(res)\n msg_fmt = '[Endpoint `%s`] Request recorded: `%s`'\n log.debug(msg_fmt, ctx.data['endpoint_id'], res)\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = self.server.context\n if ctx.data['record_requests']:\n self._record_request()\n if ctx.data['encoded_response']:\n msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested'\n log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response'])\n self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response'])\n return True\n return super()._process_commands(content_type, blob)\n<|end_body_1|>\n", "class_docstring": "A request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.", "class_name": "RecordingHTTPRequestHandler", "detected_licenses": ["Apache-2.0", "MIT", "LicenseRef-scancode-oracle-bcl-javase-javafx-2012", "ErlPL-1.1", "MPL-2.0", "ISC", "BSL-1.0", "Python-2.0", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordingHTTPRequestHandler:\n \"\"\"A request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\"\"\"\n\n def _record_request(self):\n \"\"\"Store all the relevant data of the request into the endpoint context.\"\"\"\n <|body_0|>\n\n def _process_commands(self, content_type, blob):\n \"\"\"Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ctx = self.server.context\n res = {}\n res['method'] = self.command\n res['path'] = self.path\n res['headers'] = self.headers.items()\n res['request_version'] = self.request_version\n if self.headers.get('Content-Length') is not None:\n body_length = int(self.headers.get('Content-Length'))\n res['request_body'] = self.rfile.read(body_length).decode('utf-8')\n else:\n res['request_body'] = None\n res['request_time'] = time.time()\n with ctx.lock:\n ctx.data['requests'].append(res)\n msg_fmt = '[Endpoint `%s`] Request recorded: `%s`'\n log.debug(msg_fmt, ctx.data['endpoint_id'], res)\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = self.server.context\n if ctx.data['record_requests']:\n self._record_request()\n if ctx.data['encoded_response']:\n msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested'\n log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response'])\n self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response'])\n return True\n return super()._process_commands(content_type, blob)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000278", "length_bytes": 4611, "license_type": "permissive", "methods": [{"docstring": "Store all the relevant data of the request into the endpoint context.", "name": "_record_request", "signature": "def _record_request(self)"}, {"docstring": "Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.", "name": "_process_commands", "signature": "def _process_commands(self, content_type, blob)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000009", "prompt": "Implement the Python class `RecordingHTTPRequestHandler` described below.\n\nClass description:\nA request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\n\nMethod signatures and docstrings:\n- def _record_request(self): Store all the relevant data of the request into the endpoint context.\n- def _process_commands(self, content_type, blob): Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.", "prompted_full_text": "Implement the Python class `RecordingHTTPRequestHandler` described below.\n\nClass description:\nA request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\n\nMethod signatures and docstrings:\n- def _record_request(self): Store all the relevant data of the request into the endpoint context.\n- def _process_commands(self, content_type, blob): Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.\n\n<|skeleton|>\nclass RecordingHTTPRequestHandler:\n \"\"\"A request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\"\"\"\n\n def _record_request(self):\n \"\"\"Store all the relevant data of the request into the endpoint context.\"\"\"\n <|body_0|>\n\n def _process_commands(self, content_type, blob):\n \"\"\"Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ctx = self.server.context\n res = {}\n res['method'] = self.command\n res['path'] = self.path\n res['headers'] = self.headers.items()\n res['request_version'] = self.request_version\n if self.headers.get('Content-Length') is not None:\n body_length = int(self.headers.get('Content-Length'))\n res['request_body'] = self.rfile.read(body_length).decode('utf-8')\n else:\n res['request_body'] = None\n res['request_time'] = time.time()\n with ctx.lock:\n ctx.data['requests'].append(res)\n msg_fmt = '[Endpoint `%s`] Request recorded: `%s`'\n log.debug(msg_fmt, ctx.data['endpoint_id'], res)\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = self.server.context\n if ctx.data['record_requests']:\n self._record_request()\n if ctx.data['encoded_response']:\n msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested'\n log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response'])\n self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response'])\n return True\n return super()._process_commands(content_type, blob)\n<|end_body_1|>\n", "revision_id": "79b9a39b4e639dc2c9435a869918399b50bfaf24", "skeleton": "<|skeleton|>\nclass RecordingHTTPRequestHandler:\n \"\"\"A request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\"\"\"\n\n def _record_request(self):\n \"\"\"Store all the relevant data of the request into the endpoint context.\"\"\"\n <|body_0|>\n\n def _process_commands(self, content_type, blob):\n \"\"\"Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RecordingHTTPRequestHandler:\n \"\"\"A request hander class implementing recording all the requests&request data made to given endpoint. This class will most likely be inherited from and extended with some extra code that actually processes the requests because on itself it just returns some sample text.\"\"\"\n\n def _record_request(self):\n \"\"\"Store all the relevant data of the request into the endpoint context.\"\"\"\n ctx = self.server.context\n res = {}\n res['method'] = self.command\n res['path'] = self.path\n res['headers'] = self.headers.items()\n res['request_version'] = self.request_version\n if self.headers.get('Content-Length') is not None:\n body_length = int(self.headers.get('Content-Length'))\n res['request_body'] = self.rfile.read(body_length).decode('utf-8')\n else:\n res['request_body'] = None\n res['request_time'] = time.time()\n with ctx.lock:\n ctx.data['requests'].append(res)\n msg_fmt = '[Endpoint `%s`] Request recorded: `%s`'\n log.debug(msg_fmt, ctx.data['endpoint_id'], res)\n\n def _process_commands(self, content_type, blob):\n \"\"\"Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.\"\"\"\n ctx = self.server.context\n if ctx.data['record_requests']:\n self._record_request()\n if ctx.data['encoded_response']:\n msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested'\n log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response'])\n self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response'])\n return True\n return super()._process_commands(content_type, blob)\n", "source": "the_stack_v2_python_sparse", "source_path": "packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/recording.py", "source_repo": "dcos/dcos", "split": "val", "star_events_count": 2613} {"blob_id": "3c54577a58ae0980ae7e50bba98f4e78f9be434d", "bodies": ["query, session = context[:2]\ncondition = True\nif 's' in query:\n condition = Plugin.name.matches(query.s) | Plugin.description.matches(query.s)\nplugins = session.query(Plugin, condition)\nreturn cv.create_json('success', cv.dictize(plugins))", "request, session = context[:2]\nzip_file = ZipFile(BytesIO(request.data))\ntry:\n meta_json = cv.deserialize_json(zip_file.open('.canvas/meta.json').read())\n name, description = (meta_json['name'], meta_json['description'])\n repo_url = meta_json['repo_url']\n dependencies = meta_json['dependencies']\nexcept KeyError:\n raise cv.BadRequest('Invalid plugin package (bad meta info)')\nplugin = Plugin(name, description, repo_url)\nsession.save(plugin)\nfor dependency_name in dependencies:\n dependency = session.query(Plugin, Plugin.name == dependency_name, one=True)\n if not dependency:\n raise cv.UnprocessableEntity('Invalid dependency \"%s\"' % dependency_name)\n session.save(PluginDependency(plugin, dependency))\nsession.commit()\nreturn cv.create_json('success', {'created_id': plugin.id})"], "bodies_text": "<|body_start_0|>\n query, session = context[:2]\n condition = True\n if 's' in query:\n condition = Plugin.name.matches(query.s) | Plugin.description.matches(query.s)\n plugins = session.query(Plugin, condition)\n return cv.create_json('success', cv.dictize(plugins))\n<|end_body_0|>\n\n<|body_start_1|>\n request, session = context[:2]\n zip_file = ZipFile(BytesIO(request.data))\n try:\n meta_json = cv.deserialize_json(zip_file.open('.canvas/meta.json').read())\n name, description = (meta_json['name'], meta_json['description'])\n repo_url = meta_json['repo_url']\n dependencies = meta_json['dependencies']\n except KeyError:\n raise cv.BadRequest('Invalid plugin package (bad meta info)')\n plugin = Plugin(name, description, repo_url)\n session.save(plugin)\n for dependency_name in dependencies:\n dependency = session.query(Plugin, Plugin.name == dependency_name, one=True)\n if not dependency:\n raise cv.UnprocessableEntity('Invalid dependency \"%s\"' % dependency_name)\n session.save(PluginDependency(plugin, dependency))\n session.commit()\n return cv.create_json('success', {'created_id': plugin.id})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PluginCollectionEndpoint", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PluginCollectionEndpoint:\n\n def on_get(self, context):\n \"\"\"Retrieve a list of plugins.\"\"\"\n <|body_0|>\n\n def on_post(self, context):\n \"\"\"Upload a plugin.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query, session = context[:2]\n condition = True\n if 's' in query:\n condition = Plugin.name.matches(query.s) | Plugin.description.matches(query.s)\n plugins = session.query(Plugin, condition)\n return cv.create_json('success', cv.dictize(plugins))\n<|end_body_0|>\n\n<|body_start_1|>\n request, session = context[:2]\n zip_file = ZipFile(BytesIO(request.data))\n try:\n meta_json = cv.deserialize_json(zip_file.open('.canvas/meta.json').read())\n name, description = (meta_json['name'], meta_json['description'])\n repo_url = meta_json['repo_url']\n dependencies = meta_json['dependencies']\n except KeyError:\n raise cv.BadRequest('Invalid plugin package (bad meta info)')\n plugin = Plugin(name, description, repo_url)\n session.save(plugin)\n for dependency_name in dependencies:\n dependency = session.query(Plugin, Plugin.name == dependency_name, one=True)\n if not dependency:\n raise cv.UnprocessableEntity('Invalid dependency \"%s\"' % dependency_name)\n session.save(PluginDependency(plugin, dependency))\n session.commit()\n return cv.create_json('success', {'created_id': plugin.id})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000279", "length_bytes": 1550, "license_type": "permissive", "methods": [{"docstring": "Retrieve a list of plugins.", "name": "on_get", "signature": "def on_get(self, context)"}, {"docstring": "Upload a plugin.", "name": "on_post", "signature": "def on_post(self, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006666", "prompt": "Implement the Python class `PluginCollectionEndpoint` described below.\n\nClass description:\nImplement the PluginCollectionEndpoint class.\n\nMethod signatures and docstrings:\n- def on_get(self, context): Retrieve a list of plugins.\n- def on_post(self, context): Upload a plugin.", "prompted_full_text": "Implement the Python class `PluginCollectionEndpoint` described below.\n\nClass description:\nImplement the PluginCollectionEndpoint class.\n\nMethod signatures and docstrings:\n- def on_get(self, context): Retrieve a list of plugins.\n- def on_post(self, context): Upload a plugin.\n\n<|skeleton|>\nclass PluginCollectionEndpoint:\n\n def on_get(self, context):\n \"\"\"Retrieve a list of plugins.\"\"\"\n <|body_0|>\n\n def on_post(self, context):\n \"\"\"Upload a plugin.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query, session = context[:2]\n condition = True\n if 's' in query:\n condition = Plugin.name.matches(query.s) | Plugin.description.matches(query.s)\n plugins = session.query(Plugin, condition)\n return cv.create_json('success', cv.dictize(plugins))\n<|end_body_0|>\n\n<|body_start_1|>\n request, session = context[:2]\n zip_file = ZipFile(BytesIO(request.data))\n try:\n meta_json = cv.deserialize_json(zip_file.open('.canvas/meta.json').read())\n name, description = (meta_json['name'], meta_json['description'])\n repo_url = meta_json['repo_url']\n dependencies = meta_json['dependencies']\n except KeyError:\n raise cv.BadRequest('Invalid plugin package (bad meta info)')\n plugin = Plugin(name, description, repo_url)\n session.save(plugin)\n for dependency_name in dependencies:\n dependency = session.query(Plugin, Plugin.name == dependency_name, one=True)\n if not dependency:\n raise cv.UnprocessableEntity('Invalid dependency \"%s\"' % dependency_name)\n session.save(PluginDependency(plugin, dependency))\n session.commit()\n return cv.create_json('success', {'created_id': plugin.id})\n<|end_body_1|>\n", "revision_id": "20fd6a3cc42af5f2cde73e3b100d3edeb4e50c01", "skeleton": "<|skeleton|>\nclass PluginCollectionEndpoint:\n\n def on_get(self, context):\n \"\"\"Retrieve a list of plugins.\"\"\"\n <|body_0|>\n\n def on_post(self, context):\n \"\"\"Upload a plugin.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PluginCollectionEndpoint:\n def on_get(self, context):\n \"\"\"Retrieve a list of plugins.\"\"\"\n query, session = context[:2]\n condition = True\n if 's' in query:\n condition = Plugin.name.matches(query.s) | Plugin.description.matches(query.s)\n plugins = session.query(Plugin, condition)\n return cv.create_json('success', cv.dictize(plugins))\n\n def on_post(self, context):\n \"\"\"Upload a plugin.\"\"\"\n request, session = context[:2]\n zip_file = ZipFile(BytesIO(request.data))\n try:\n meta_json = cv.deserialize_json(zip_file.open('.canvas/meta.json').read())\n name, description = (meta_json['name'], meta_json['description'])\n repo_url = meta_json['repo_url']\n dependencies = meta_json['dependencies']\n except KeyError:\n raise cv.BadRequest('Invalid plugin package (bad meta info)')\n plugin = Plugin(name, description, repo_url)\n session.save(plugin)\n for dependency_name in dependencies:\n dependency = session.query(Plugin, Plugin.name == dependency_name, one=True)\n if not dependency:\n raise cv.UnprocessableEntity('Invalid dependency \"%s\"' % dependency_name)\n session.save(PluginDependency(plugin, dependency))\n session.commit()\n return cv.create_json('success', {'created_id': plugin.id})\n", "source": "the_stack_v2_python_sparse", "source_path": "cvpl-homepage/homepage/api.py", "source_repo": "robinsax/canvas-plugin-multirepo", "split": "val", "star_events_count": 0} {"blob_id": "c5fd1b577997ab14158787ed75b35152ac9ff12a", "bodies": ["if lstm_size is None and rnn_construction_fn is None:\n raise ValueError('Need to provide either custom rnn_construction_fn or lstm_size.')\nif lstm_size and rnn_construction_fn:\n raise ValueError('Cannot provide both custom rnn_construction_fn and lstm_size.')\nkernel_initializer = tf.compat.v1.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')\ninput_encoder = encoding_network.EncodingNetwork(input_tensor_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=input_fc_layer_params, activation_fn=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype)\nif rnn_construction_fn:\n rnn_construction_kwargs = rnn_construction_kwargs or {}\n lstm_network = rnn_construction_fn(**rnn_construction_kwargs)\nelse:\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0], dtype=dtype, implementation=KERAS_LSTM_FUSED)\n else:\n cell = tf.keras.layers.StackedRNNCells([tf.keras.layers.LSTMCell(size, dtype=dtype, implementation=KERAS_LSTM_FUSED) for size in lstm_size])\n lstm_network = dynamic_unroll_layer.DynamicUnroll(cell)\noutput_encoder = []\nif output_fc_layer_params:\n output_encoder = [tf.keras.layers.Dense(num_units, activation=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype) for num_units in output_fc_layer_params]\ncounter = [-1]\n\ndef create_spec(size):\n counter[0] += 1\n return tensor_spec.TensorSpec(size, dtype=dtype, name='network_state_%d' % counter[0])\nstate_spec = tf.nest.map_structure(create_spec, lstm_network.cell.state_size)\nsuper(LSTMEncodingNetwork, self).__init__(input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\nself._conv_layer_params = conv_layer_params\nself._input_encoder = input_encoder\nself._lstm_network = lstm_network\nself._output_encoder = output_encoder", "num_outer_dims = nest_utils.get_outer_rank(observation, self.input_tensor_spec)\nif num_outer_dims not in (1, 2):\n raise ValueError('Input observation must have a batch or batch x time outer shape.')\nhas_time_dim = num_outer_dims == 2\nif not has_time_dim:\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), observation)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), step_type)\nstate, _ = self._input_encoder(observation, step_type=step_type, network_state=(), training=training)\nnetwork_kwargs = {}\nif isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n network_kwargs['reset_mask'] = tf.equal(step_type, time_step.StepType.FIRST, name='mask')\noutput = self._lstm_network(inputs=state, initial_state=network_state, training=training, **network_kwargs)\nif isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n state, network_state = output\nelse:\n state = output[0]\n network_state = tf.nest.pack_sequence_as(self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))\nfor layer in self._output_encoder:\n state = layer(state, training=training)\nif not has_time_dim:\n state = tf.squeeze(state, [1])\nreturn (state, network_state)"], "bodies_text": "<|body_start_0|>\n if lstm_size is None and rnn_construction_fn is None:\n raise ValueError('Need to provide either custom rnn_construction_fn or lstm_size.')\n if lstm_size and rnn_construction_fn:\n raise ValueError('Cannot provide both custom rnn_construction_fn and lstm_size.')\n kernel_initializer = tf.compat.v1.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')\n input_encoder = encoding_network.EncodingNetwork(input_tensor_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=input_fc_layer_params, activation_fn=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype)\n if rnn_construction_fn:\n rnn_construction_kwargs = rnn_construction_kwargs or {}\n lstm_network = rnn_construction_fn(**rnn_construction_kwargs)\n else:\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0], dtype=dtype, implementation=KERAS_LSTM_FUSED)\n else:\n cell = tf.keras.layers.StackedRNNCells([tf.keras.layers.LSTMCell(size, dtype=dtype, implementation=KERAS_LSTM_FUSED) for size in lstm_size])\n lstm_network = dynamic_unroll_layer.DynamicUnroll(cell)\n output_encoder = []\n if output_fc_layer_params:\n output_encoder = [tf.keras.layers.Dense(num_units, activation=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype) for num_units in output_fc_layer_params]\n counter = [-1]\n\n def create_spec(size):\n counter[0] += 1\n return tensor_spec.TensorSpec(size, dtype=dtype, name='network_state_%d' % counter[0])\n state_spec = tf.nest.map_structure(create_spec, lstm_network.cell.state_size)\n super(LSTMEncodingNetwork, self).__init__(input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\n self._conv_layer_params = conv_layer_params\n self._input_encoder = input_encoder\n self._lstm_network = lstm_network\n self._output_encoder = output_encoder\n<|end_body_0|>\n\n<|body_start_1|>\n num_outer_dims = nest_utils.get_outer_rank(observation, self.input_tensor_spec)\n if num_outer_dims not in (1, 2):\n raise ValueError('Input observation must have a batch or batch x time outer shape.')\n has_time_dim = num_outer_dims == 2\n if not has_time_dim:\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), observation)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), step_type)\n state, _ = self._input_encoder(observation, step_type=step_type, network_state=(), training=training)\n network_kwargs = {}\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n network_kwargs['reset_mask'] = tf.equal(step_type, time_step.StepType.FIRST, name='mask')\n output = self._lstm_network(inputs=state, initial_state=network_state, training=training, **network_kwargs)\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n state, network_state = output\n else:\n state = output[0]\n network_state = tf.nest.pack_sequence_as(self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))\n for layer in self._output_encoder:\n state = layer(state, training=training)\n if not has_time_dim:\n state = tf.squeeze(state, [1])\n return (state, network_state)\n<|end_body_1|>\n", "class_docstring": "Recurrent network.", "class_name": "LSTMEncodingNetwork", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LSTMEncodingNetwork:\n \"\"\"Recurrent network.\"\"\"\n\n def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'):\n \"\"\"Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\"\"\"\n <|body_0|>\n\n def call(self, observation, step_type, network_state=(), training=False):\n \"\"\"Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if lstm_size is None and rnn_construction_fn is None:\n raise ValueError('Need to provide either custom rnn_construction_fn or lstm_size.')\n if lstm_size and rnn_construction_fn:\n raise ValueError('Cannot provide both custom rnn_construction_fn and lstm_size.')\n kernel_initializer = tf.compat.v1.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')\n input_encoder = encoding_network.EncodingNetwork(input_tensor_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=input_fc_layer_params, activation_fn=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype)\n if rnn_construction_fn:\n rnn_construction_kwargs = rnn_construction_kwargs or {}\n lstm_network = rnn_construction_fn(**rnn_construction_kwargs)\n else:\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0], dtype=dtype, implementation=KERAS_LSTM_FUSED)\n else:\n cell = tf.keras.layers.StackedRNNCells([tf.keras.layers.LSTMCell(size, dtype=dtype, implementation=KERAS_LSTM_FUSED) for size in lstm_size])\n lstm_network = dynamic_unroll_layer.DynamicUnroll(cell)\n output_encoder = []\n if output_fc_layer_params:\n output_encoder = [tf.keras.layers.Dense(num_units, activation=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype) for num_units in output_fc_layer_params]\n counter = [-1]\n\n def create_spec(size):\n counter[0] += 1\n return tensor_spec.TensorSpec(size, dtype=dtype, name='network_state_%d' % counter[0])\n state_spec = tf.nest.map_structure(create_spec, lstm_network.cell.state_size)\n super(LSTMEncodingNetwork, self).__init__(input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\n self._conv_layer_params = conv_layer_params\n self._input_encoder = input_encoder\n self._lstm_network = lstm_network\n self._output_encoder = output_encoder\n<|end_body_0|>\n\n<|body_start_1|>\n num_outer_dims = nest_utils.get_outer_rank(observation, self.input_tensor_spec)\n if num_outer_dims not in (1, 2):\n raise ValueError('Input observation must have a batch or batch x time outer shape.')\n has_time_dim = num_outer_dims == 2\n if not has_time_dim:\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), observation)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), step_type)\n state, _ = self._input_encoder(observation, step_type=step_type, network_state=(), training=training)\n network_kwargs = {}\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n network_kwargs['reset_mask'] = tf.equal(step_type, time_step.StepType.FIRST, name='mask')\n output = self._lstm_network(inputs=state, initial_state=network_state, training=training, **network_kwargs)\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n state, network_state = output\n else:\n state = output[0]\n network_state = tf.nest.pack_sequence_as(self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))\n for layer in self._output_encoder:\n state = layer(state, training=training)\n if not has_time_dim:\n state = tf.squeeze(state, [1])\n return (state, network_state)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000280", "length_bytes": 9710, "license_type": "permissive", "methods": [{"docstring": "Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =", "name": "__init__", "signature": "def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork')"}, {"docstring": "Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.", "name": "call", "signature": "def call(self, observation, step_type, network_state=(), training=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002587", "prompt": "Implement the Python class `LSTMEncodingNetwork` described below.\n\nClass description:\nRecurrent network.\n\nMethod signatures and docstrings:\n- def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'): Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\n- def call(self, observation, step_type, network_state=(), training=False): Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.", "prompted_full_text": "Implement the Python class `LSTMEncodingNetwork` described below.\n\nClass description:\nRecurrent network.\n\nMethod signatures and docstrings:\n- def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'): Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\n- def call(self, observation, step_type, network_state=(), training=False): Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.\n\n<|skeleton|>\nclass LSTMEncodingNetwork:\n \"\"\"Recurrent network.\"\"\"\n\n def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'):\n \"\"\"Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\"\"\"\n <|body_0|>\n\n def call(self, observation, step_type, network_state=(), training=False):\n \"\"\"Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if lstm_size is None and rnn_construction_fn is None:\n raise ValueError('Need to provide either custom rnn_construction_fn or lstm_size.')\n if lstm_size and rnn_construction_fn:\n raise ValueError('Cannot provide both custom rnn_construction_fn and lstm_size.')\n kernel_initializer = tf.compat.v1.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')\n input_encoder = encoding_network.EncodingNetwork(input_tensor_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=input_fc_layer_params, activation_fn=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype)\n if rnn_construction_fn:\n rnn_construction_kwargs = rnn_construction_kwargs or {}\n lstm_network = rnn_construction_fn(**rnn_construction_kwargs)\n else:\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0], dtype=dtype, implementation=KERAS_LSTM_FUSED)\n else:\n cell = tf.keras.layers.StackedRNNCells([tf.keras.layers.LSTMCell(size, dtype=dtype, implementation=KERAS_LSTM_FUSED) for size in lstm_size])\n lstm_network = dynamic_unroll_layer.DynamicUnroll(cell)\n output_encoder = []\n if output_fc_layer_params:\n output_encoder = [tf.keras.layers.Dense(num_units, activation=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype) for num_units in output_fc_layer_params]\n counter = [-1]\n\n def create_spec(size):\n counter[0] += 1\n return tensor_spec.TensorSpec(size, dtype=dtype, name='network_state_%d' % counter[0])\n state_spec = tf.nest.map_structure(create_spec, lstm_network.cell.state_size)\n super(LSTMEncodingNetwork, self).__init__(input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\n self._conv_layer_params = conv_layer_params\n self._input_encoder = input_encoder\n self._lstm_network = lstm_network\n self._output_encoder = output_encoder\n<|end_body_0|>\n\n<|body_start_1|>\n num_outer_dims = nest_utils.get_outer_rank(observation, self.input_tensor_spec)\n if num_outer_dims not in (1, 2):\n raise ValueError('Input observation must have a batch or batch x time outer shape.')\n has_time_dim = num_outer_dims == 2\n if not has_time_dim:\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), observation)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), step_type)\n state, _ = self._input_encoder(observation, step_type=step_type, network_state=(), training=training)\n network_kwargs = {}\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n network_kwargs['reset_mask'] = tf.equal(step_type, time_step.StepType.FIRST, name='mask')\n output = self._lstm_network(inputs=state, initial_state=network_state, training=training, **network_kwargs)\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n state, network_state = output\n else:\n state = output[0]\n network_state = tf.nest.pack_sequence_as(self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))\n for layer in self._output_encoder:\n state = layer(state, training=training)\n if not has_time_dim:\n state = tf.squeeze(state, [1])\n return (state, network_state)\n<|end_body_1|>\n", "revision_id": "eca1093d3a047e538f17f6ab92ab4d8144284f23", "skeleton": "<|skeleton|>\nclass LSTMEncodingNetwork:\n \"\"\"Recurrent network.\"\"\"\n\n def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'):\n \"\"\"Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\"\"\"\n <|body_0|>\n\n def call(self, observation, step_type, network_state=(), training=False):\n \"\"\"Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LSTMEncodingNetwork:\n \"\"\"Recurrent network.\"\"\"\n\n def __init__(self, input_tensor_spec, preprocessing_layers=None, preprocessing_combiner=None, conv_layer_params=None, input_fc_layer_params=(75, 40), lstm_size=None, output_fc_layer_params=(75, 40), activation_fn=tf.keras.activations.relu, rnn_construction_fn=None, rnn_construction_kwargs=None, dtype=tf.float32, name='LSTMEncodingNetwork'):\n \"\"\"Creates an instance of `LSTMEncodingNetwork`. Input preprocessing is possible via `preprocessing_layers` and `preprocessing_combiner` Layers. If the `preprocessing_layers` nest is shallower than `input_tensor_spec`, then the layers will get the subnests. For example, if: ```python input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5) preprocessing_layers = (Layer1(), Layer2()) ``` then preprocessing will call: ```python preprocessed = [preprocessing_layers[0](observations[0]), preprocessing_layers[1](observations[1])] ``` However if ```python preprocessing_layers = ([Layer1() for _ in range(2)], [Layer2() for _ in range(5)]) ``` then preprocessing will call: ```python preprocessed =\"\"\"\n if lstm_size is None and rnn_construction_fn is None:\n raise ValueError('Need to provide either custom rnn_construction_fn or lstm_size.')\n if lstm_size and rnn_construction_fn:\n raise ValueError('Cannot provide both custom rnn_construction_fn and lstm_size.')\n kernel_initializer = tf.compat.v1.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')\n input_encoder = encoding_network.EncodingNetwork(input_tensor_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=input_fc_layer_params, activation_fn=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype)\n if rnn_construction_fn:\n rnn_construction_kwargs = rnn_construction_kwargs or {}\n lstm_network = rnn_construction_fn(**rnn_construction_kwargs)\n else:\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0], dtype=dtype, implementation=KERAS_LSTM_FUSED)\n else:\n cell = tf.keras.layers.StackedRNNCells([tf.keras.layers.LSTMCell(size, dtype=dtype, implementation=KERAS_LSTM_FUSED) for size in lstm_size])\n lstm_network = dynamic_unroll_layer.DynamicUnroll(cell)\n output_encoder = []\n if output_fc_layer_params:\n output_encoder = [tf.keras.layers.Dense(num_units, activation=activation_fn, kernel_initializer=kernel_initializer, dtype=dtype) for num_units in output_fc_layer_params]\n counter = [-1]\n\n def create_spec(size):\n counter[0] += 1\n return tensor_spec.TensorSpec(size, dtype=dtype, name='network_state_%d' % counter[0])\n state_spec = tf.nest.map_structure(create_spec, lstm_network.cell.state_size)\n super(LSTMEncodingNetwork, self).__init__(input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\n self._conv_layer_params = conv_layer_params\n self._input_encoder = input_encoder\n self._lstm_network = lstm_network\n self._output_encoder = output_encoder\n\n def call(self, observation, step_type, network_state=(), training=False):\n \"\"\"Apply the network. Args: observation: A tuple of tensors matching `input_tensor_spec`. step_type: A tensor of `StepType. network_state: (optional.) The network state. training: Whether the output is being used for training. Returns: `(outputs, network_state)` - the network output and next network state. Raises: ValueError: If observation tensors lack outer `(batch,)` or `(batch, time)` axes.\"\"\"\n num_outer_dims = nest_utils.get_outer_rank(observation, self.input_tensor_spec)\n if num_outer_dims not in (1, 2):\n raise ValueError('Input observation must have a batch or batch x time outer shape.')\n has_time_dim = num_outer_dims == 2\n if not has_time_dim:\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), observation)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), step_type)\n state, _ = self._input_encoder(observation, step_type=step_type, network_state=(), training=training)\n network_kwargs = {}\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n network_kwargs['reset_mask'] = tf.equal(step_type, time_step.StepType.FIRST, name='mask')\n output = self._lstm_network(inputs=state, initial_state=network_state, training=training, **network_kwargs)\n if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):\n state, network_state = output\n else:\n state = output[0]\n network_state = tf.nest.pack_sequence_as(self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))\n for layer in self._output_encoder:\n state = layer(state, training=training)\n if not has_time_dim:\n state = tf.squeeze(state, [1])\n return (state, network_state)\n", "source": "the_stack_v2_python_sparse", "source_path": "tf_agents/networks/lstm_encoding_network.py", "source_repo": "tensorflow/agents", "split": "val", "star_events_count": 2755} {"blob_id": "c9328796db21c5430ef629e3d2009f839837cd3f", "bodies": ["i = 0\nj = len(height) - 1\nleft_bound = 0\nright_bound = 0\ntotal = 0\nwhile i < len(height) and j > 0 and (i <= j):\n if height[i] <= height[j]:\n total += max(0, left_bound - height[i])\n left_bound = max(left_bound, height[i])\n i += 1\n elif height[i] > height[j]:\n total += max(0, right_bound - height[j])\n right_bound = max(right_bound, height[j])\n j -= 1\nreturn total", "left = [0] * len(height)\nright = [0] * len(height)\ntotal = 0\nfor i in range(1, len(height)):\n left[i] = max(left[i - 1], height[i - 1])\nfor j in range(len(height) - 2, -1, -1):\n right[j] = max(right[j + 1], height[j + 1])\nfor k in range(len(height)):\n lower_bound = min(left[k], right[k])\n water = max(0, lower_bound - height[k])\n total += water\nreturn total"], "bodies_text": "<|body_start_0|>\n i = 0\n j = len(height) - 1\n left_bound = 0\n right_bound = 0\n total = 0\n while i < len(height) and j > 0 and (i <= j):\n if height[i] <= height[j]:\n total += max(0, left_bound - height[i])\n left_bound = max(left_bound, height[i])\n i += 1\n elif height[i] > height[j]:\n total += max(0, right_bound - height[j])\n right_bound = max(right_bound, height[j])\n j -= 1\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n left = [0] * len(height)\n right = [0] * len(height)\n total = 0\n for i in range(1, len(height)):\n left[i] = max(left[i - 1], height[i - 1])\n for j in range(len(height) - 2, -1, -1):\n right[j] = max(right[j + 1], height[j + 1])\n for k in range(len(height)):\n lower_bound = min(left[k], right[k])\n water = max(0, lower_bound - height[k])\n total += water\n return total\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\":type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\"\"\"\n <|body_0|>\n\n def trap1(self, height):\n \"\"\":type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = 0\n j = len(height) - 1\n left_bound = 0\n right_bound = 0\n total = 0\n while i < len(height) and j > 0 and (i <= j):\n if height[i] <= height[j]:\n total += max(0, left_bound - height[i])\n left_bound = max(left_bound, height[i])\n i += 1\n elif height[i] > height[j]:\n total += max(0, right_bound - height[j])\n right_bound = max(right_bound, height[j])\n j -= 1\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n left = [0] * len(height)\n right = [0] * len(height)\n total = 0\n for i in range(1, len(height)):\n left[i] = max(left[i - 1], height[i - 1])\n for j in range(len(height) - 2, -1, -1):\n right[j] = max(right[j + 1], height[j + 1])\n for k in range(len(height)):\n lower_bound = min(left[k], right[k])\n water = max(0, lower_bound - height[k])\n total += water\n return total\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000281", "length_bytes": 2815, "license_type": "no_license", "methods": [{"docstring": ":type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe", "name": "trap", "signature": "def trap(self, height)"}, {"docstring": ":type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))", "name": "trap1", "signature": "def trap1(self, height)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def trap(self, height): :type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\n- def trap1(self, height): :type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def trap(self, height): :type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\n- def trap1(self, height): :type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))\n\n<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\":type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\"\"\"\n <|body_0|>\n\n def trap1(self, height):\n \"\"\":type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = 0\n j = len(height) - 1\n left_bound = 0\n right_bound = 0\n total = 0\n while i < len(height) and j > 0 and (i <= j):\n if height[i] <= height[j]:\n total += max(0, left_bound - height[i])\n left_bound = max(left_bound, height[i])\n i += 1\n elif height[i] > height[j]:\n total += max(0, right_bound - height[j])\n right_bound = max(right_bound, height[j])\n j -= 1\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n left = [0] * len(height)\n right = [0] * len(height)\n total = 0\n for i in range(1, len(height)):\n left[i] = max(left[i - 1], height[i - 1])\n for j in range(len(height) - 2, -1, -1):\n right[j] = max(right[j + 1], height[j + 1])\n for k in range(len(height)):\n lower_bound = min(left[k], right[k])\n water = max(0, lower_bound - height[k])\n total += water\n return total\n<|end_body_1|>\n", "revision_id": "bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6", "skeleton": "<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\":type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\"\"\"\n <|body_0|>\n\n def trap1(self, height):\n \"\"\":type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def trap(self, height):\n \"\"\":type height: List[int] :rtype: int **Thought: Q: which bound matter? - only the lower of (left_bound, right_bound) matter to how much water trap at a piont - so, you have two pointers: i, j, keep track of left-bound and right bound respectively. - move i or j whichever one is the lower bound. (i++, j--) Q: How much water trap: - water = max(0, lower_bound - height) - if height is higher than lower_bound, no water trap Q: When do you update the amount of water trap: - you update the amoutn of water trap at height[i] when you pointer is that that place. - so, after you update the water trap at height[i], you move the pointer forward or backward depend on which point you move (i++, j--) Q: Whe\"\"\"\n i = 0\n j = len(height) - 1\n left_bound = 0\n right_bound = 0\n total = 0\n while i < len(height) and j > 0 and (i <= j):\n if height[i] <= height[j]:\n total += max(0, left_bound - height[i])\n left_bound = max(left_bound, height[i])\n i += 1\n elif height[i] > height[j]:\n total += max(0, right_bound - height[j])\n right_bound = max(right_bound, height[j])\n j -= 1\n return total\n\n def trap1(self, height):\n \"\"\":type height: List[int] :rtype: int time complexity: O(N) = (O(N) + O(N) + O(N))\"\"\"\n left = [0] * len(height)\n right = [0] * len(height)\n total = 0\n for i in range(1, len(height)):\n left[i] = max(left[i - 1], height[i - 1])\n for j in range(len(height) - 2, -1, -1):\n right[j] = max(right[j + 1], height[j + 1])\n for k in range(len(height)):\n lower_bound = min(left[k], right[k])\n water = max(0, lower_bound - height[k])\n total += water\n return total\n", "source": "the_stack_v2_python_sparse", "source_path": "dynamic_programming/42_trapping_rain_water.py", "source_repo": "mistrydarshan99/Leetcode-3", "split": "val", "star_events_count": 0} {"blob_id": "d4ab58c68de28b35e72a7c2a1c61603e502f909e", "bodies": ["genre = Genre.query.filter_by(id=id).first()\nif genre is None:\n return ({'message': 'Genre does not exist'}, 404)\nreturn genre_schema.dump(genre)", "req = api.payload\ngenre = Genre.query.filter_by(id=id).first()\nif genre is None:\n return ({'message': 'Genre does not exist'}, 404)\ntry:\n edit_genre = genre_schema.load(req)\nexcept ValidationError:\n return {'error': err.messages}\ngenre.name = edit_genre.name\ntry:\n db.session.commit()\nexcept Exception:\n return ({'message': 'Unable to edit Genre'}, 500)\nreturn {'message': 'Role updated successfully'}", "genre = Genre.query.filter_by(id=id).first()\nif genre is None:\n return ({'message': 'Genre does not exist'}, 404)\ntry:\n db.session.delete(genre)\n db.session.commit()\nexcept Exception:\n return ({'message': 'Unable to delete Genre'}, 500)\nreturn {'message': 'Genre deleted successfully'}"], "bodies_text": "<|body_start_0|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n return genre_schema.dump(genre)\n<|end_body_0|>\n\n<|body_start_1|>\n req = api.payload\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n edit_genre = genre_schema.load(req)\n except ValidationError:\n return {'error': err.messages}\n genre.name = edit_genre.name\n try:\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to edit Genre'}, 500)\n return {'message': 'Role updated successfully'}\n<|end_body_1|>\n\n<|body_start_2|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n db.session.delete(genre)\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to delete Genre'}, 500)\n return {'message': 'Genre deleted successfully'}\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SingleGenre", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SingleGenre:\n\n def get(self, id):\n \"\"\"Get Genre by id\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"Update a Genre\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"Delete a Genre by id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n return genre_schema.dump(genre)\n<|end_body_0|>\n\n<|body_start_1|>\n req = api.payload\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n edit_genre = genre_schema.load(req)\n except ValidationError:\n return {'error': err.messages}\n genre.name = edit_genre.name\n try:\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to edit Genre'}, 500)\n return {'message': 'Role updated successfully'}\n<|end_body_1|>\n\n<|body_start_2|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n db.session.delete(genre)\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to delete Genre'}, 500)\n return {'message': 'Genre deleted successfully'}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000282", "length_bytes": 3163, "license_type": "no_license", "methods": [{"docstring": "Get Genre by id", "name": "get", "signature": "def get(self, id)"}, {"docstring": "Update a Genre", "name": "put", "signature": "def put(self, id)"}, {"docstring": "Delete a Genre by id", "name": "delete", "signature": "def delete(self, id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000084", "prompt": "Implement the Python class `SingleGenre` described below.\n\nClass description:\nImplement the SingleGenre class.\n\nMethod signatures and docstrings:\n- def get(self, id): Get Genre by id\n- def put(self, id): Update a Genre\n- def delete(self, id): Delete a Genre by id", "prompted_full_text": "Implement the Python class `SingleGenre` described below.\n\nClass description:\nImplement the SingleGenre class.\n\nMethod signatures and docstrings:\n- def get(self, id): Get Genre by id\n- def put(self, id): Update a Genre\n- def delete(self, id): Delete a Genre by id\n\n<|skeleton|>\nclass SingleGenre:\n\n def get(self, id):\n \"\"\"Get Genre by id\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"Update a Genre\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"Delete a Genre by id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n return genre_schema.dump(genre)\n<|end_body_0|>\n\n<|body_start_1|>\n req = api.payload\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n edit_genre = genre_schema.load(req)\n except ValidationError:\n return {'error': err.messages}\n genre.name = edit_genre.name\n try:\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to edit Genre'}, 500)\n return {'message': 'Role updated successfully'}\n<|end_body_1|>\n\n<|body_start_2|>\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n db.session.delete(genre)\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to delete Genre'}, 500)\n return {'message': 'Genre deleted successfully'}\n<|end_body_2|>\n", "revision_id": "ae78fff9888b0f68d9403d7f65cba086dabb3802", "skeleton": "<|skeleton|>\nclass SingleGenre:\n\n def get(self, id):\n \"\"\"Get Genre by id\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"Update a Genre\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"Delete a Genre by id\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SingleGenre:\n def get(self, id):\n \"\"\"Get Genre by id\"\"\"\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n return genre_schema.dump(genre)\n\n def put(self, id):\n \"\"\"Update a Genre\"\"\"\n req = api.payload\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n edit_genre = genre_schema.load(req)\n except ValidationError:\n return {'error': err.messages}\n genre.name = edit_genre.name\n try:\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to edit Genre'}, 500)\n return {'message': 'Role updated successfully'}\n\n def delete(self, id):\n \"\"\"Delete a Genre by id\"\"\"\n genre = Genre.query.filter_by(id=id).first()\n if genre is None:\n return ({'message': 'Genre does not exist'}, 404)\n try:\n db.session.delete(genre)\n db.session.commit()\n except Exception:\n return ({'message': 'Unable to delete Genre'}, 500)\n return {'message': 'Genre deleted successfully'}\n", "source": "the_stack_v2_python_sparse", "source_path": "api/v1/genres.py", "source_repo": "mythril-io/flask-api", "split": "val", "star_events_count": 0} {"blob_id": "4ca725b2ceddda6c2718ea2e1b12f4a7cd92f451", "bodies": ["import itertools\nparts = []\nsets = []\nfor c in S:\n seen = False\n merge_start = -1\n for i in range(0, len(sets)):\n if c in sets[i]:\n seen = True\n merge_start = i\n break\n parts.append([c])\n sets.append({c})\n if seen:\n parts_tmp = []\n sets_tmp = []\n for i in range(merge_start):\n parts_tmp.append(parts[i])\n sets_tmp.append(sets[i])\n parts_tmp.append(list(itertools.chain(*parts[merge_start:])))\n sets_tmp.append(set(itertools.chain(*sets[merge_start:])))\n parts = parts_tmp\n sets = sets_tmp\nreturn [len(part) for part in parts]", "lasts = {c: index for index, c in enumerate(S)}\nparts = []\nend = anchor = 0\nfor i, c in enumerate(S):\n end = max(end, lasts[c])\n if end == i:\n parts.append(end - anchor + 1)\n anchor = end + 1\nreturn parts"], "bodies_text": "<|body_start_0|>\n import itertools\n parts = []\n sets = []\n for c in S:\n seen = False\n merge_start = -1\n for i in range(0, len(sets)):\n if c in sets[i]:\n seen = True\n merge_start = i\n break\n parts.append([c])\n sets.append({c})\n if seen:\n parts_tmp = []\n sets_tmp = []\n for i in range(merge_start):\n parts_tmp.append(parts[i])\n sets_tmp.append(sets[i])\n parts_tmp.append(list(itertools.chain(*parts[merge_start:])))\n sets_tmp.append(set(itertools.chain(*sets[merge_start:])))\n parts = parts_tmp\n sets = sets_tmp\n return [len(part) for part in parts]\n<|end_body_0|>\n\n<|body_start_1|>\n lasts = {c: index for index, c in enumerate(S)}\n parts = []\n end = anchor = 0\n for i, c in enumerate(S):\n end = max(end, lasts[c])\n if end == i:\n parts.append(end - anchor + 1)\n anchor = end + 1\n return parts\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def partitionLabels(self, S):\n \"\"\"Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\"\"\"\n <|body_0|>\n\n def partitionLabels2(self, S):\n \"\"\"Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import itertools\n parts = []\n sets = []\n for c in S:\n seen = False\n merge_start = -1\n for i in range(0, len(sets)):\n if c in sets[i]:\n seen = True\n merge_start = i\n break\n parts.append([c])\n sets.append({c})\n if seen:\n parts_tmp = []\n sets_tmp = []\n for i in range(merge_start):\n parts_tmp.append(parts[i])\n sets_tmp.append(sets[i])\n parts_tmp.append(list(itertools.chain(*parts[merge_start:])))\n sets_tmp.append(set(itertools.chain(*sets[merge_start:])))\n parts = parts_tmp\n sets = sets_tmp\n return [len(part) for part in parts]\n<|end_body_0|>\n\n<|body_start_1|>\n lasts = {c: index for index, c in enumerate(S)}\n parts = []\n end = anchor = 0\n for i, c in enumerate(S):\n end = max(end, lasts[c])\n if end == i:\n parts.append(end - anchor + 1)\n anchor = end + 1\n return parts\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000283", "length_bytes": 2059, "license_type": "no_license", "methods": [{"docstring": "Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]", "name": "partitionLabels", "signature": "def partitionLabels(self, S)"}, {"docstring": "Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]", "name": "partitionLabels2", "signature": "def partitionLabels2(self, S)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002252", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def partitionLabels(self, S): Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\n- def partitionLabels2(self, S): Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def partitionLabels(self, S): Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\n- def partitionLabels2(self, S): Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def partitionLabels(self, S):\n \"\"\"Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\"\"\"\n <|body_0|>\n\n def partitionLabels2(self, S):\n \"\"\"Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import itertools\n parts = []\n sets = []\n for c in S:\n seen = False\n merge_start = -1\n for i in range(0, len(sets)):\n if c in sets[i]:\n seen = True\n merge_start = i\n break\n parts.append([c])\n sets.append({c})\n if seen:\n parts_tmp = []\n sets_tmp = []\n for i in range(merge_start):\n parts_tmp.append(parts[i])\n sets_tmp.append(sets[i])\n parts_tmp.append(list(itertools.chain(*parts[merge_start:])))\n sets_tmp.append(set(itertools.chain(*sets[merge_start:])))\n parts = parts_tmp\n sets = sets_tmp\n return [len(part) for part in parts]\n<|end_body_0|>\n\n<|body_start_1|>\n lasts = {c: index for index, c in enumerate(S)}\n parts = []\n end = anchor = 0\n for i, c in enumerate(S):\n end = max(end, lasts[c])\n if end == i:\n parts.append(end - anchor + 1)\n anchor = end + 1\n return parts\n<|end_body_1|>\n", "revision_id": "143aa25f92f3827aa379f29c67a9b7ec3757fef9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def partitionLabels(self, S):\n \"\"\"Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\"\"\"\n <|body_0|>\n\n def partitionLabels2(self, S):\n \"\"\"Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def partitionLabels(self, S):\n \"\"\"Linear scan the input string, store the previous partitions into a list. If the current char belongs to any of the previous partitions, merge them with the current char into one partition. :type S: str :rtype: List[int]\"\"\"\n import itertools\n parts = []\n sets = []\n for c in S:\n seen = False\n merge_start = -1\n for i in range(0, len(sets)):\n if c in sets[i]:\n seen = True\n merge_start = i\n break\n parts.append([c])\n sets.append({c})\n if seen:\n parts_tmp = []\n sets_tmp = []\n for i in range(merge_start):\n parts_tmp.append(parts[i])\n sets_tmp.append(sets[i])\n parts_tmp.append(list(itertools.chain(*parts[merge_start:])))\n sets_tmp.append(set(itertools.chain(*sets[merge_start:])))\n parts = parts_tmp\n sets = sets_tmp\n return [len(part) for part in parts]\n\n def partitionLabels2(self, S):\n \"\"\"Keep a dict/map of each unique letter's last position index (anchor) in the input. Then greedily choose the smallest partition for the first letter 'c' :type S: str :rtype: List[int]\"\"\"\n lasts = {c: index for index, c in enumerate(S)}\n parts = []\n end = anchor = 0\n for i, c in enumerate(S):\n end = max(end, lasts[c])\n if end == i:\n parts.append(end - anchor + 1)\n anchor = end + 1\n return parts\n", "source": "the_stack_v2_python_sparse", "source_path": "py/leetcode_py/763.py", "source_repo": "imsure/tech-interview-prep", "split": "val", "star_events_count": 0} {"blob_id": "503c72178d8d2931ae0e3700e7ee3a0b5478a821", "bodies": ["result = {'result': 'NG'}\ndata = request.get_json(force=True)\nif data:\n succsee, message = CtrlQuotations().add_option_by_quotation_id2(quotation_id, data)\n if succsee:\n result = {'result': 'OK', 'content': message}\n else:\n result['error'] = message\nelse:\n result['error'] = '请不要传空数据'\nreturn result", "result = {'result': 'NG', 'content': []}\nif quotation_id:\n res = CtrlQuotations().get_option_by_quotation_id(quotation_id)\n if res:\n result['result'] = 'OK'\n result['content'] = res\nreturn result"], "bodies_text": "<|body_start_0|>\n result = {'result': 'NG'}\n data = request.get_json(force=True)\n if data:\n succsee, message = CtrlQuotations().add_option_by_quotation_id2(quotation_id, data)\n if succsee:\n result = {'result': 'OK', 'content': message}\n else:\n result['error'] = message\n else:\n result['error'] = '请不要传空数据'\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'result': 'NG', 'content': []}\n if quotation_id:\n res = CtrlQuotations().get_option_by_quotation_id(quotation_id)\n if res:\n result['result'] = 'OK'\n result['content'] = res\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ApiOptionInfo", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ApiOptionInfo:\n\n def post(self, quotation_id):\n \"\"\"更新/添加此报价下的Option :return:\"\"\"\n <|body_0|>\n\n def get(self, quotation_id):\n \"\"\"获取此报价下的所有Option :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = {'result': 'NG'}\n data = request.get_json(force=True)\n if data:\n succsee, message = CtrlQuotations().add_option_by_quotation_id2(quotation_id, data)\n if succsee:\n result = {'result': 'OK', 'content': message}\n else:\n result['error'] = message\n else:\n result['error'] = '请不要传空数据'\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'result': 'NG', 'content': []}\n if quotation_id:\n res = CtrlQuotations().get_option_by_quotation_id(quotation_id)\n if res:\n result['result'] = 'OK'\n result['content'] = res\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000284", "length_bytes": 10406, "license_type": "no_license", "methods": [{"docstring": "更新/添加此报价下的Option :return:", "name": "post", "signature": "def post(self, quotation_id)"}, {"docstring": "获取此报价下的所有Option :return:", "name": "get", "signature": "def get(self, quotation_id)"}], "n_methods": 2, "prompt": "Implement the Python class `ApiOptionInfo` described below.\n\nClass description:\nImplement the ApiOptionInfo class.\n\nMethod signatures and docstrings:\n- def post(self, quotation_id): 更新/添加此报价下的Option :return:\n- def get(self, quotation_id): 获取此报价下的所有Option :return:", "prompted_full_text": "Implement the Python class `ApiOptionInfo` described below.\n\nClass description:\nImplement the ApiOptionInfo class.\n\nMethod signatures and docstrings:\n- def post(self, quotation_id): 更新/添加此报价下的Option :return:\n- def get(self, quotation_id): 获取此报价下的所有Option :return:\n\n<|skeleton|>\nclass ApiOptionInfo:\n\n def post(self, quotation_id):\n \"\"\"更新/添加此报价下的Option :return:\"\"\"\n <|body_0|>\n\n def get(self, quotation_id):\n \"\"\"获取此报价下的所有Option :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = {'result': 'NG'}\n data = request.get_json(force=True)\n if data:\n succsee, message = CtrlQuotations().add_option_by_quotation_id2(quotation_id, data)\n if succsee:\n result = {'result': 'OK', 'content': message}\n else:\n result['error'] = message\n else:\n result['error'] = '请不要传空数据'\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'result': 'NG', 'content': []}\n if quotation_id:\n res = CtrlQuotations().get_option_by_quotation_id(quotation_id)\n if res:\n result['result'] = 'OK'\n result['content'] = res\n return result\n<|end_body_1|>\n", "revision_id": "64b31e7bdfcb8a4c95f0a8a607f0bcff576cec11", "skeleton": "<|skeleton|>\nclass ApiOptionInfo:\n\n def post(self, quotation_id):\n \"\"\"更新/添加此报价下的Option :return:\"\"\"\n <|body_0|>\n\n def get(self, quotation_id):\n \"\"\"获取此报价下的所有Option :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ApiOptionInfo:\n def post(self, quotation_id):\n \"\"\"更新/添加此报价下的Option :return:\"\"\"\n result = {'result': 'NG'}\n data = request.get_json(force=True)\n if data:\n succsee, message = CtrlQuotations().add_option_by_quotation_id2(quotation_id, data)\n if succsee:\n result = {'result': 'OK', 'content': message}\n else:\n result['error'] = message\n else:\n result['error'] = '请不要传空数据'\n return result\n\n def get(self, quotation_id):\n \"\"\"获取此报价下的所有Option :return:\"\"\"\n result = {'result': 'NG', 'content': []}\n if quotation_id:\n res = CtrlQuotations().get_option_by_quotation_id(quotation_id)\n if res:\n result['result'] = 'OK'\n result['content'] = res\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "koala/koala_server/app/api_1_0/api_quotations.py", "source_repo": "lsn1183/web_project", "split": "val", "star_events_count": 0} {"blob_id": "b5558d4c02340a7240356d68b7833e1dcf71dda0", "bodies": ["self._graph = graph\nself._type = t\nself._batch_size = batch_size\nself._strategy = strategy\nself._client = self._graph.get_client()\nself._node_from = node_from\nif self._node_from == pywrap.NodeFrom.NODE:\n if self._type not in self._graph.get_node_decoders().keys():\n raise ValueError('Graph has no node type of {}'.format(self._type))\n self._node_type = self._type\nelse:\n topology = self._graph.get_topology()\n src_type, dst_type = (topology.get_src_type(self._type), topology.get_dst_type(self._type))\n self._src_type, self._dst_type = (src_type, dst_type)\n if self._node_from == pywrap.NodeFrom.EDGE_SRC:\n self._node_type = src_type\n else:\n self._node_type = dst_type", "state = self._graph.node_state.get(self._type)\nreq = pywrap.new_get_nodes_request(self._type, self._strategy, self._node_from, self._batch_size, state)\nres = pywrap.new_get_nodes_response()\nstatus = self._client.get_nodes(req, res)\nif not status.ok():\n self._graph.edge_state.inc(self._type)\nelse:\n ids = pywrap.get_node_ids(res)\npywrap.del_op_response(res)\npywrap.del_op_request(req)\nraise_exception_on_not_ok_status(status)\nnodes = self._graph.get_nodes(self._node_type, ids)\nreturn nodes"], "bodies_text": "<|body_start_0|>\n self._graph = graph\n self._type = t\n self._batch_size = batch_size\n self._strategy = strategy\n self._client = self._graph.get_client()\n self._node_from = node_from\n if self._node_from == pywrap.NodeFrom.NODE:\n if self._type not in self._graph.get_node_decoders().keys():\n raise ValueError('Graph has no node type of {}'.format(self._type))\n self._node_type = self._type\n else:\n topology = self._graph.get_topology()\n src_type, dst_type = (topology.get_src_type(self._type), topology.get_dst_type(self._type))\n self._src_type, self._dst_type = (src_type, dst_type)\n if self._node_from == pywrap.NodeFrom.EDGE_SRC:\n self._node_type = src_type\n else:\n self._node_type = dst_type\n<|end_body_0|>\n\n<|body_start_1|>\n state = self._graph.node_state.get(self._type)\n req = pywrap.new_get_nodes_request(self._type, self._strategy, self._node_from, self._batch_size, state)\n res = pywrap.new_get_nodes_response()\n status = self._client.get_nodes(req, res)\n if not status.ok():\n self._graph.edge_state.inc(self._type)\n else:\n ids = pywrap.get_node_ids(res)\n pywrap.del_op_response(res)\n pywrap.del_op_request(req)\n raise_exception_on_not_ok_status(status)\n nodes = self._graph.get_nodes(self._node_type, ids)\n return nodes\n<|end_body_1|>\n", "class_docstring": "Sampling nodes from graph.", "class_name": "NodeSampler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NodeSampler:\n \"\"\"Sampling nodes from graph.\"\"\"\n\n def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE):\n \"\"\"Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._graph = graph\n self._type = t\n self._batch_size = batch_size\n self._strategy = strategy\n self._client = self._graph.get_client()\n self._node_from = node_from\n if self._node_from == pywrap.NodeFrom.NODE:\n if self._type not in self._graph.get_node_decoders().keys():\n raise ValueError('Graph has no node type of {}'.format(self._type))\n self._node_type = self._type\n else:\n topology = self._graph.get_topology()\n src_type, dst_type = (topology.get_src_type(self._type), topology.get_dst_type(self._type))\n self._src_type, self._dst_type = (src_type, dst_type)\n if self._node_from == pywrap.NodeFrom.EDGE_SRC:\n self._node_type = src_type\n else:\n self._node_type = dst_type\n<|end_body_0|>\n\n<|body_start_1|>\n state = self._graph.node_state.get(self._type)\n req = pywrap.new_get_nodes_request(self._type, self._strategy, self._node_from, self._batch_size, state)\n res = pywrap.new_get_nodes_response()\n status = self._client.get_nodes(req, res)\n if not status.ok():\n self._graph.edge_state.inc(self._type)\n else:\n ids = pywrap.get_node_ids(res)\n pywrap.del_op_response(res)\n pywrap.del_op_request(req)\n raise_exception_on_not_ok_status(status)\n nodes = self._graph.get_nodes(self._node_type, ids)\n return nodes\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000285", "length_bytes": 4632, "license_type": "permissive", "methods": [{"docstring": "Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou", "name": "__init__", "signature": "def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE)"}, {"docstring": "Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`", "name": "get", "signature": "def get(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000365", "prompt": "Implement the Python class `NodeSampler` described below.\n\nClass description:\nSampling nodes from graph.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE): Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\n- def get(self): Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`", "prompted_full_text": "Implement the Python class `NodeSampler` described below.\n\nClass description:\nSampling nodes from graph.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE): Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\n- def get(self): Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`\n\n<|skeleton|>\nclass NodeSampler:\n \"\"\"Sampling nodes from graph.\"\"\"\n\n def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE):\n \"\"\"Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._graph = graph\n self._type = t\n self._batch_size = batch_size\n self._strategy = strategy\n self._client = self._graph.get_client()\n self._node_from = node_from\n if self._node_from == pywrap.NodeFrom.NODE:\n if self._type not in self._graph.get_node_decoders().keys():\n raise ValueError('Graph has no node type of {}'.format(self._type))\n self._node_type = self._type\n else:\n topology = self._graph.get_topology()\n src_type, dst_type = (topology.get_src_type(self._type), topology.get_dst_type(self._type))\n self._src_type, self._dst_type = (src_type, dst_type)\n if self._node_from == pywrap.NodeFrom.EDGE_SRC:\n self._node_type = src_type\n else:\n self._node_type = dst_type\n<|end_body_0|>\n\n<|body_start_1|>\n state = self._graph.node_state.get(self._type)\n req = pywrap.new_get_nodes_request(self._type, self._strategy, self._node_from, self._batch_size, state)\n res = pywrap.new_get_nodes_response()\n status = self._client.get_nodes(req, res)\n if not status.ok():\n self._graph.edge_state.inc(self._type)\n else:\n ids = pywrap.get_node_ids(res)\n pywrap.del_op_response(res)\n pywrap.del_op_request(req)\n raise_exception_on_not_ok_status(status)\n nodes = self._graph.get_nodes(self._node_type, ids)\n return nodes\n<|end_body_1|>\n", "revision_id": "1827c28c570c355e513f24b6a61a88457cfecdaf", "skeleton": "<|skeleton|>\nclass NodeSampler:\n \"\"\"Sampling nodes from graph.\"\"\"\n\n def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE):\n \"\"\"Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NodeSampler:\n \"\"\"Sampling nodes from graph.\"\"\"\n\n def __init__(self, graph, t, batch_size, strategy='by_order', node_from=pywrap.NodeFrom.NODE):\n \"\"\"Create a Base NodeSampler.. Args: graph (`Graph` object): The graph which sample from. t (string): type of node or egde. If t is a type of node, then `NodeSampler` will sample from node source. Else if `t` is a type of edge, then `node_from=EDGE_SRC` indicates that the nodes will be sampled from edges's source nodes, `node_from=EDGE_DST` indicates that the nodes will be sampled from edges's dst nodes. batch_size (int): How many nodes will be returned for `get()`. strategy (string, Optional): Sampling strategy. \"by_order\", \"random\" and \"shuffle\" are supported. \"by_order\": get nodes by order of how the specified node is stored, if the specified type of nodes are totally visited, `graphlearn.Ou\"\"\"\n self._graph = graph\n self._type = t\n self._batch_size = batch_size\n self._strategy = strategy\n self._client = self._graph.get_client()\n self._node_from = node_from\n if self._node_from == pywrap.NodeFrom.NODE:\n if self._type not in self._graph.get_node_decoders().keys():\n raise ValueError('Graph has no node type of {}'.format(self._type))\n self._node_type = self._type\n else:\n topology = self._graph.get_topology()\n src_type, dst_type = (topology.get_src_type(self._type), topology.get_dst_type(self._type))\n self._src_type, self._dst_type = (src_type, dst_type)\n if self._node_from == pywrap.NodeFrom.EDGE_SRC:\n self._node_type = src_type\n else:\n self._node_type = dst_type\n\n def get(self):\n \"\"\"Get batched sampled nodes. Return: A `Nodes` object, shape=[`batch_size`] Raise: `graphlearn.OutOfRangeError`\"\"\"\n state = self._graph.node_state.get(self._type)\n req = pywrap.new_get_nodes_request(self._type, self._strategy, self._node_from, self._batch_size, state)\n res = pywrap.new_get_nodes_response()\n status = self._client.get_nodes(req, res)\n if not status.ok():\n self._graph.edge_state.inc(self._type)\n else:\n ids = pywrap.get_node_ids(res)\n pywrap.del_op_response(res)\n pywrap.del_op_request(req)\n raise_exception_on_not_ok_status(status)\n nodes = self._graph.get_nodes(self._node_type, ids)\n return nodes\n", "source": "the_stack_v2_python_sparse", "source_path": "graphlearn/python/sampler/node_sampler.py", "source_repo": "lorinlee/graph-learn", "split": "val", "star_events_count": 0} {"blob_id": "790e1bc996d03b7d6f4aeee8cb1f4bb932117324", "bodies": ["self.total_commission = 0.0\nfor data in self:\n if data.commission == True:\n if data.commission_type == 'fixed':\n data.total_commission = data.total_rent * (data.fix_qty / 100.0)\n if data.commission_type == 'fixedcost':\n data.total_commission = data.fix_cost", "for data in self:\n if data.total_commission == 0.0:\n raise Warning(_('Total Commission must be grater than zero.'))\n line_vlas = {'name': 'Commission', 'commission_type': data.commission_type, 'rent_amt': data.total_rent, 'date': data.date_start, 'end_date': data.date, 'amount': data.total_commission}\n vals = {'patner_id': data.tenant_id.id, 'tenancy': data.id, 'property_id': data.property_id.id, 'agent': data.agent.id, 'commission_line': [(0, 0, line_vlas)]}\n self.env['commission.invoice'].create(vals)\n data.write({'commission_create': True})", "if self.commission is False:\n self.agent = 0\n self.commission_type = ''\n self.fix_qty = 0.0\n self.fix_cost = 0.0"], "bodies_text": "<|body_start_0|>\n self.total_commission = 0.0\n for data in self:\n if data.commission == True:\n if data.commission_type == 'fixed':\n data.total_commission = data.total_rent * (data.fix_qty / 100.0)\n if data.commission_type == 'fixedcost':\n data.total_commission = data.fix_cost\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self:\n if data.total_commission == 0.0:\n raise Warning(_('Total Commission must be grater than zero.'))\n line_vlas = {'name': 'Commission', 'commission_type': data.commission_type, 'rent_amt': data.total_rent, 'date': data.date_start, 'end_date': data.date, 'amount': data.total_commission}\n vals = {'patner_id': data.tenant_id.id, 'tenancy': data.id, 'property_id': data.property_id.id, 'agent': data.agent.id, 'commission_line': [(0, 0, line_vlas)]}\n self.env['commission.invoice'].create(vals)\n data.write({'commission_create': True})\n<|end_body_1|>\n\n<|body_start_2|>\n if self.commission is False:\n self.agent = 0\n self.commission_type = ''\n self.fix_qty = 0.0\n self.fix_cost = 0.0\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AccountAnalyticAccount", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountAnalyticAccount:\n\n def calculate_commission(self):\n \"\"\"This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_0|>\n\n def create_commission(self):\n \"\"\"This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_1|>\n\n def onchange_property_id(self):\n \"\"\"This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.total_commission = 0.0\n for data in self:\n if data.commission == True:\n if data.commission_type == 'fixed':\n data.total_commission = data.total_rent * (data.fix_qty / 100.0)\n if data.commission_type == 'fixedcost':\n data.total_commission = data.fix_cost\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self:\n if data.total_commission == 0.0:\n raise Warning(_('Total Commission must be grater than zero.'))\n line_vlas = {'name': 'Commission', 'commission_type': data.commission_type, 'rent_amt': data.total_rent, 'date': data.date_start, 'end_date': data.date, 'amount': data.total_commission}\n vals = {'patner_id': data.tenant_id.id, 'tenancy': data.id, 'property_id': data.property_id.id, 'agent': data.agent.id, 'commission_line': [(0, 0, line_vlas)]}\n self.env['commission.invoice'].create(vals)\n data.write({'commission_create': True})\n<|end_body_1|>\n\n<|body_start_2|>\n if self.commission is False:\n self.agent = 0\n self.commission_type = ''\n self.fix_qty = 0.0\n self.fix_cost = 0.0\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000286", "length_bytes": 11342, "license_type": "no_license", "methods": [{"docstring": "This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer", "name": "calculate_commission", "signature": "def calculate_commission(self)"}, {"docstring": "This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer", "name": "create_commission", "signature": "def create_commission(self)"}, {"docstring": "This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer", "name": "onchange_property_id", "signature": "def onchange_property_id(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005411", "prompt": "Implement the Python class `AccountAnalyticAccount` described below.\n\nClass description:\nImplement the AccountAnalyticAccount class.\n\nMethod signatures and docstrings:\n- def calculate_commission(self): This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\n- def create_commission(self): This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\n- def onchange_property_id(self): This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer", "prompted_full_text": "Implement the Python class `AccountAnalyticAccount` described below.\n\nClass description:\nImplement the AccountAnalyticAccount class.\n\nMethod signatures and docstrings:\n- def calculate_commission(self): This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\n- def create_commission(self): This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\n- def onchange_property_id(self): This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer\n\n<|skeleton|>\nclass AccountAnalyticAccount:\n\n def calculate_commission(self):\n \"\"\"This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_0|>\n\n def create_commission(self):\n \"\"\"This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_1|>\n\n def onchange_property_id(self):\n \"\"\"This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.total_commission = 0.0\n for data in self:\n if data.commission == True:\n if data.commission_type == 'fixed':\n data.total_commission = data.total_rent * (data.fix_qty / 100.0)\n if data.commission_type == 'fixedcost':\n data.total_commission = data.fix_cost\n<|end_body_0|>\n\n<|body_start_1|>\n for data in self:\n if data.total_commission == 0.0:\n raise Warning(_('Total Commission must be grater than zero.'))\n line_vlas = {'name': 'Commission', 'commission_type': data.commission_type, 'rent_amt': data.total_rent, 'date': data.date_start, 'end_date': data.date, 'amount': data.total_commission}\n vals = {'patner_id': data.tenant_id.id, 'tenancy': data.id, 'property_id': data.property_id.id, 'agent': data.agent.id, 'commission_line': [(0, 0, line_vlas)]}\n self.env['commission.invoice'].create(vals)\n data.write({'commission_create': True})\n<|end_body_1|>\n\n<|body_start_2|>\n if self.commission is False:\n self.agent = 0\n self.commission_type = ''\n self.fix_qty = 0.0\n self.fix_cost = 0.0\n<|end_body_2|>\n", "revision_id": "163136f382faa8607db8fb6cda42a5ba07c4076b", "skeleton": "<|skeleton|>\nclass AccountAnalyticAccount:\n\n def calculate_commission(self):\n \"\"\"This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_0|>\n\n def create_commission(self):\n \"\"\"This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_1|>\n\n def onchange_property_id(self):\n \"\"\"This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AccountAnalyticAccount:\n def calculate_commission(self):\n \"\"\"This method is used to calculate commistion as per commition type ----------------------------------------------------------------- @param self: The object pointer\"\"\"\n self.total_commission = 0.0\n for data in self:\n if data.commission == True:\n if data.commission_type == 'fixed':\n data.total_commission = data.total_rent * (data.fix_qty / 100.0)\n if data.commission_type == 'fixedcost':\n data.total_commission = data.fix_cost\n\n def create_commission(self):\n \"\"\"This button method is used to Change Tenancy state to Open. ----------------------------------------------------------- @param self: The object pointer\"\"\"\n for data in self:\n if data.total_commission == 0.0:\n raise Warning(_('Total Commission must be grater than zero.'))\n line_vlas = {'name': 'Commission', 'commission_type': data.commission_type, 'rent_amt': data.total_rent, 'date': data.date_start, 'end_date': data.date, 'amount': data.total_commission}\n vals = {'patner_id': data.tenant_id.id, 'tenancy': data.id, 'property_id': data.property_id.id, 'agent': data.agent.id, 'commission_line': [(0, 0, line_vlas)]}\n self.env['commission.invoice'].create(vals)\n data.write({'commission_create': True})\n\n def onchange_property_id(self):\n \"\"\"This method is used to check if the commistion field False than othe field value will be null or zero or false. --------------------------------------------------------------- @param self: The object pointer\"\"\"\n if self.commission is False:\n self.agent = 0\n self.commission_type = ''\n self.fix_qty = 0.0\n self.fix_cost = 0.0\n", "source": "the_stack_v2_python_sparse", "source_path": "property_commission_ee/models/property_commission.py", "source_repo": "maarejsys/Roya", "split": "val", "star_events_count": 0} {"blob_id": "08e7c57db0a98a6429e8857c13300161322df682", "bodies": ["if not info:\n return None\nnamespace = info['namespace']\nif self.type == 'input':\n tag_name = etree.QName(namespace, self.operation.name)\nelse:\n tag_name = etree.QName(namespace, self.abstract.name.localname)\nelements = []\nfor name, msg in parts.items():\n if msg.element:\n elements.append(msg.element)\n else:\n elements.append(xsd.Element(name, msg.type))\nreturn xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))", "process_multiref(body_element)\nresponse_element = list(body_element)[0]\nif self.body:\n context = XmlParserContext(self.wsdl.settings)\n result = self.body.parse(response_element, self.wsdl.types, context=context)\n return {'body': result}\nreturn {'body': None}"], "bodies_text": "<|body_start_0|>\n if not info:\n return None\n namespace = info['namespace']\n if self.type == 'input':\n tag_name = etree.QName(namespace, self.operation.name)\n else:\n tag_name = etree.QName(namespace, self.abstract.name.localname)\n elements = []\n for name, msg in parts.items():\n if msg.element:\n elements.append(msg.element)\n else:\n elements.append(xsd.Element(name, msg.type))\n return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))\n<|end_body_0|>\n\n<|body_start_1|>\n process_multiref(body_element)\n response_element = list(body_element)[0]\n if self.body:\n context = XmlParserContext(self.wsdl.settings)\n result = self.body.parse(response_element, self.wsdl.types, context=context)\n return {'body': result}\n return {'body': None}\n<|end_body_1|>\n", "class_docstring": "In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding", "class_name": "RpcMessage", "detected_licenses": ["MIT", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RpcMessage:\n \"\"\"In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\"\"\"\n\n def _resolve_body(self, info, definitions, parts):\n \"\"\"Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\"\"\"\n <|body_0|>\n\n def _deserialize_body(self, body_element):\n \"\"\"The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not info:\n return None\n namespace = info['namespace']\n if self.type == 'input':\n tag_name = etree.QName(namespace, self.operation.name)\n else:\n tag_name = etree.QName(namespace, self.abstract.name.localname)\n elements = []\n for name, msg in parts.items():\n if msg.element:\n elements.append(msg.element)\n else:\n elements.append(xsd.Element(name, msg.type))\n return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))\n<|end_body_0|>\n\n<|body_start_1|>\n process_multiref(body_element)\n response_element = list(body_element)[0]\n if self.body:\n context = XmlParserContext(self.wsdl.settings)\n result = self.body.parse(response_element, self.wsdl.types, context=context)\n return {'body': result}\n return {'body': None}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000287", "length_bytes": 19450, "license_type": "permissive", "methods": [{"docstring": "Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.", "name": "_resolve_body", "signature": "def _resolve_body(self, info, definitions, parts)"}, {"docstring": "The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.", "name": "_deserialize_body", "signature": "def _deserialize_body(self, body_element)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006771", "prompt": "Implement the Python class `RpcMessage` described below.\n\nClass description:\nIn RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\n\nMethod signatures and docstrings:\n- def _resolve_body(self, info, definitions, parts): Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\n- def _deserialize_body(self, body_element): The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.", "prompted_full_text": "Implement the Python class `RpcMessage` described below.\n\nClass description:\nIn RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\n\nMethod signatures and docstrings:\n- def _resolve_body(self, info, definitions, parts): Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\n- def _deserialize_body(self, body_element): The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.\n\n<|skeleton|>\nclass RpcMessage:\n \"\"\"In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\"\"\"\n\n def _resolve_body(self, info, definitions, parts):\n \"\"\"Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\"\"\"\n <|body_0|>\n\n def _deserialize_body(self, body_element):\n \"\"\"The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not info:\n return None\n namespace = info['namespace']\n if self.type == 'input':\n tag_name = etree.QName(namespace, self.operation.name)\n else:\n tag_name = etree.QName(namespace, self.abstract.name.localname)\n elements = []\n for name, msg in parts.items():\n if msg.element:\n elements.append(msg.element)\n else:\n elements.append(xsd.Element(name, msg.type))\n return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))\n<|end_body_0|>\n\n<|body_start_1|>\n process_multiref(body_element)\n response_element = list(body_element)[0]\n if self.body:\n context = XmlParserContext(self.wsdl.settings)\n result = self.body.parse(response_element, self.wsdl.types, context=context)\n return {'body': result}\n return {'body': None}\n<|end_body_1|>\n", "revision_id": "377d9313b1b4807a31a5ee42227f1dc7e7e0471e", "skeleton": "<|skeleton|>\nclass RpcMessage:\n \"\"\"In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\"\"\"\n\n def _resolve_body(self, info, definitions, parts):\n \"\"\"Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\"\"\"\n <|body_0|>\n\n def _deserialize_body(self, body_element):\n \"\"\"The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RpcMessage:\n \"\"\"In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.binding\"\"\"\n\n def _resolve_body(self, info, definitions, parts):\n \"\"\"Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute.\"\"\"\n if not info:\n return None\n namespace = info['namespace']\n if self.type == 'input':\n tag_name = etree.QName(namespace, self.operation.name)\n else:\n tag_name = etree.QName(namespace, self.abstract.name.localname)\n elements = []\n for name, msg in parts.items():\n if msg.element:\n elements.append(msg.element)\n else:\n elements.append(xsd.Element(name, msg.type))\n return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements)))\n\n def _deserialize_body(self, body_element):\n \"\"\"The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element.\"\"\"\n process_multiref(body_element)\n response_element = list(body_element)[0]\n if self.body:\n context = XmlParserContext(self.wsdl.settings)\n result = self.body.parse(response_element, self.wsdl.types, context=context)\n return {'body': result}\n return {'body': None}\n", "source": "the_stack_v2_python_sparse", "source_path": "src/zeep/wsdl/messages/soap.py", "source_repo": "mvantellingen/python-zeep", "split": "val", "star_events_count": 1938} {"blob_id": "2edc20ef8018c6ad89c72a6a2684da90895a7860", "bodies": ["def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\nreturn ' '.join(map(str, post_order(root)))", "post_order = [int(each) for each in data.split(' ') if data]\nprint(post_order)\n\ndef helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n return root\nr = helper()\nreturn r"], "bodies_text": "<|body_start_0|>\n def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\n return ' '.join(map(str, post_order(root)))\n<|end_body_0|>\n\n<|body_start_1|>\n post_order = [int(each) for each in data.split(' ') if data]\n print(post_order)\n\n def helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n return root\n r = helper()\n return r\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\n return ' '.join(map(str, post_order(root)))\n<|end_body_0|>\n\n<|body_start_1|>\n post_order = [int(each) for each in data.split(' ') if data]\n print(post_order)\n\n def helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n return root\n r = helper()\n return r\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000288", "length_bytes": 2184, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001641", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\n return ' '.join(map(str, post_order(root)))\n<|end_body_0|>\n\n<|body_start_1|>\n post_order = [int(each) for each in data.split(' ') if data]\n print(post_order)\n\n def helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n return root\n r = helper()\n return r\n<|end_body_1|>\n", "revision_id": "80cca595dc688ca67c1ebb45b339e724ec09c374", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\n return ' '.join(map(str, post_order(root)))\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n post_order = [int(each) for each in data.split(' ') if data]\n print(post_order)\n\n def helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n return root\n r = helper()\n return r\n", "source": "the_stack_v2_python_sparse", "source_path": "Companies/Amazon/449.py", "source_repo": "Dinesh94Singh/PythonArchivedSolutions", "split": "val", "star_events_count": 0} {"blob_id": "b39c5e6d3f0a30cb5d5c9348c1c341344b9e76de", "bodies": ["self.assertEqual('', Solution().prefix_of_two('', 'abcd'))\nself.assertEqual('abc', Solution().prefix_of_two('abcd', 'abcefl'))\nself.assertEqual('abc', Solution().prefix_of_two('abc', 'abc'))", "self.assertEqual('', Solution().longest_common_prefix([]))\nself.assertEqual('abcd', Solution().longest_common_prefix(['abcd']))\nstrs = ['alpha', 'alpine', 'alpen', 'aligator', 'alpruce']\nself.assertEqual('al', Solution().longest_common_prefix(strs))"], "bodies_text": "<|body_start_0|>\n self.assertEqual('', Solution().prefix_of_two('', 'abcd'))\n self.assertEqual('abc', Solution().prefix_of_two('abcd', 'abcefl'))\n self.assertEqual('abc', Solution().prefix_of_two('abc', 'abc'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual('', Solution().longest_common_prefix([]))\n self.assertEqual('abcd', Solution().longest_common_prefix(['abcd']))\n strs = ['alpha', 'alpine', 'alpen', 'aligator', 'alpruce']\n self.assertEqual('al', Solution().longest_common_prefix(strs))\n<|end_body_1|>\n", "class_docstring": "Tests for Leetcode problem 14: Longest Common Prefix.", "class_name": "ProblemTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProblemTest:\n \"\"\"Tests for Leetcode problem 14: Longest Common Prefix.\"\"\"\n\n def test_prefix_of_two(self):\n \"\"\"Test the prefix_of_two method.\"\"\"\n <|body_0|>\n\n def test_solution(self):\n \"\"\"Test the full solution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertEqual('', Solution().prefix_of_two('', 'abcd'))\n self.assertEqual('abc', Solution().prefix_of_two('abcd', 'abcefl'))\n self.assertEqual('abc', Solution().prefix_of_two('abc', 'abc'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual('', Solution().longest_common_prefix([]))\n self.assertEqual('abcd', Solution().longest_common_prefix(['abcd']))\n strs = ['alpha', 'alpine', 'alpen', 'aligator', 'alpruce']\n self.assertEqual('al', Solution().longest_common_prefix(strs))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000289", "length_bytes": 2028, "license_type": "no_license", "methods": [{"docstring": "Test the prefix_of_two method.", "name": "test_prefix_of_two", "signature": "def test_prefix_of_two(self)"}, {"docstring": "Test the full solution.", "name": "test_solution", "signature": "def test_solution(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000860", "prompt": "Implement the Python class `ProblemTest` described below.\n\nClass description:\nTests for Leetcode problem 14: Longest Common Prefix.\n\nMethod signatures and docstrings:\n- def test_prefix_of_two(self): Test the prefix_of_two method.\n- def test_solution(self): Test the full solution.", "prompted_full_text": "Implement the Python class `ProblemTest` described below.\n\nClass description:\nTests for Leetcode problem 14: Longest Common Prefix.\n\nMethod signatures and docstrings:\n- def test_prefix_of_two(self): Test the prefix_of_two method.\n- def test_solution(self): Test the full solution.\n\n<|skeleton|>\nclass ProblemTest:\n \"\"\"Tests for Leetcode problem 14: Longest Common Prefix.\"\"\"\n\n def test_prefix_of_two(self):\n \"\"\"Test the prefix_of_two method.\"\"\"\n <|body_0|>\n\n def test_solution(self):\n \"\"\"Test the full solution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertEqual('', Solution().prefix_of_two('', 'abcd'))\n self.assertEqual('abc', Solution().prefix_of_two('abcd', 'abcefl'))\n self.assertEqual('abc', Solution().prefix_of_two('abc', 'abc'))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual('', Solution().longest_common_prefix([]))\n self.assertEqual('abcd', Solution().longest_common_prefix(['abcd']))\n strs = ['alpha', 'alpine', 'alpen', 'aligator', 'alpruce']\n self.assertEqual('al', Solution().longest_common_prefix(strs))\n<|end_body_1|>\n", "revision_id": "e11bfc454789e716055b80873af0817ec8588aea", "skeleton": "<|skeleton|>\nclass ProblemTest:\n \"\"\"Tests for Leetcode problem 14: Longest Common Prefix.\"\"\"\n\n def test_prefix_of_two(self):\n \"\"\"Test the prefix_of_two method.\"\"\"\n <|body_0|>\n\n def test_solution(self):\n \"\"\"Test the full solution.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProblemTest:\n \"\"\"Tests for Leetcode problem 14: Longest Common Prefix.\"\"\"\n\n def test_prefix_of_two(self):\n \"\"\"Test the prefix_of_two method.\"\"\"\n self.assertEqual('', Solution().prefix_of_two('', 'abcd'))\n self.assertEqual('abc', Solution().prefix_of_two('abcd', 'abcefl'))\n self.assertEqual('abc', Solution().prefix_of_two('abc', 'abc'))\n\n def test_solution(self):\n \"\"\"Test the full solution.\"\"\"\n self.assertEqual('', Solution().longest_common_prefix([]))\n self.assertEqual('abcd', Solution().longest_common_prefix(['abcd']))\n strs = ['alpha', 'alpine', 'alpen', 'aligator', 'alpruce']\n self.assertEqual('al', Solution().longest_common_prefix(strs))\n", "source": "the_stack_v2_python_sparse", "source_path": "p14/problem14.py", "source_repo": "stanl3y/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "39c84356edbc37cdbc316ab07b9c1bfd7b556fa3", "bodies": ["self.graph = GraphFactory.load_db_into_graph()\nself.all_profiles = models.Profile.objects.all()\nself.all_profiles_count = len(self.all_profiles)", "current_profile_count = 0\nfor profile in self.all_profiles:\n start_ms = time.time() * 1000.0\n profile_id = profile.id\n current_profile_count = current_profile_count + 1\n results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id))\n for current_tuple in results:\n listing_raw = current_tuple[0]\n listing_id = int(listing_raw.split('-')[1])\n score = current_tuple[1]\n self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)\n end_ms = time.time() * 1000.0\n logger.debug('Calculated Profile {}/{}, took {} ms'.format(current_profile_count, self.all_profiles_count, round(end_ms - start_ms, 3)))"], "bodies_text": "<|body_start_0|>\n self.graph = GraphFactory.load_db_into_graph()\n self.all_profiles = models.Profile.objects.all()\n self.all_profiles_count = len(self.all_profiles)\n<|end_body_0|>\n\n<|body_start_1|>\n current_profile_count = 0\n for profile in self.all_profiles:\n start_ms = time.time() * 1000.0\n profile_id = profile.id\n current_profile_count = current_profile_count + 1\n results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id))\n for current_tuple in results:\n listing_raw = current_tuple[0]\n listing_id = int(listing_raw.split('-')[1])\n score = current_tuple[1]\n self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)\n end_ms = time.time() * 1000.0\n logger.debug('Calculated Profile {}/{}, took {} ms'.format(current_profile_count, self.all_profiles_count, round(end_ms - start_ms, 3)))\n<|end_body_1|>\n", "class_docstring": "Graph Collaborative Filtering based on Bookmarkes", "class_name": "GraphCollaborativeFilteringBaseRecommender", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GraphCollaborativeFilteringBaseRecommender:\n \"\"\"Graph Collaborative Filtering based on Bookmarkes\"\"\"\n\n def initiate(self):\n \"\"\"Initiate any variables needed for recommendation_logic function\"\"\"\n <|body_0|>\n\n def recommendation_logic(self):\n \"\"\"Recommendation logic\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = GraphFactory.load_db_into_graph()\n self.all_profiles = models.Profile.objects.all()\n self.all_profiles_count = len(self.all_profiles)\n<|end_body_0|>\n\n<|body_start_1|>\n current_profile_count = 0\n for profile in self.all_profiles:\n start_ms = time.time() * 1000.0\n profile_id = profile.id\n current_profile_count = current_profile_count + 1\n results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id))\n for current_tuple in results:\n listing_raw = current_tuple[0]\n listing_id = int(listing_raw.split('-')[1])\n score = current_tuple[1]\n self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)\n end_ms = time.time() * 1000.0\n logger.debug('Calculated Profile {}/{}, took {} ms'.format(current_profile_count, self.all_profiles_count, round(end_ms - start_ms, 3)))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000290", "length_bytes": 31433, "license_type": "permissive", "methods": [{"docstring": "Initiate any variables needed for recommendation_logic function", "name": "initiate", "signature": "def initiate(self)"}, {"docstring": "Recommendation logic", "name": "recommendation_logic", "signature": "def recommendation_logic(self)"}], "n_methods": 2, "prompt": "Implement the Python class `GraphCollaborativeFilteringBaseRecommender` described below.\n\nClass description:\nGraph Collaborative Filtering based on Bookmarkes\n\nMethod signatures and docstrings:\n- def initiate(self): Initiate any variables needed for recommendation_logic function\n- def recommendation_logic(self): Recommendation logic", "prompted_full_text": "Implement the Python class `GraphCollaborativeFilteringBaseRecommender` described below.\n\nClass description:\nGraph Collaborative Filtering based on Bookmarkes\n\nMethod signatures and docstrings:\n- def initiate(self): Initiate any variables needed for recommendation_logic function\n- def recommendation_logic(self): Recommendation logic\n\n<|skeleton|>\nclass GraphCollaborativeFilteringBaseRecommender:\n \"\"\"Graph Collaborative Filtering based on Bookmarkes\"\"\"\n\n def initiate(self):\n \"\"\"Initiate any variables needed for recommendation_logic function\"\"\"\n <|body_0|>\n\n def recommendation_logic(self):\n \"\"\"Recommendation logic\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = GraphFactory.load_db_into_graph()\n self.all_profiles = models.Profile.objects.all()\n self.all_profiles_count = len(self.all_profiles)\n<|end_body_0|>\n\n<|body_start_1|>\n current_profile_count = 0\n for profile in self.all_profiles:\n start_ms = time.time() * 1000.0\n profile_id = profile.id\n current_profile_count = current_profile_count + 1\n results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id))\n for current_tuple in results:\n listing_raw = current_tuple[0]\n listing_id = int(listing_raw.split('-')[1])\n score = current_tuple[1]\n self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)\n end_ms = time.time() * 1000.0\n logger.debug('Calculated Profile {}/{}, took {} ms'.format(current_profile_count, self.all_profiles_count, round(end_ms - start_ms, 3)))\n<|end_body_1|>\n", "revision_id": "d31d00bb8a28a8d0c999813f616b398f41516244", "skeleton": "<|skeleton|>\nclass GraphCollaborativeFilteringBaseRecommender:\n \"\"\"Graph Collaborative Filtering based on Bookmarkes\"\"\"\n\n def initiate(self):\n \"\"\"Initiate any variables needed for recommendation_logic function\"\"\"\n <|body_0|>\n\n def recommendation_logic(self):\n \"\"\"Recommendation logic\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GraphCollaborativeFilteringBaseRecommender:\n \"\"\"Graph Collaborative Filtering based on Bookmarkes\"\"\"\n\n def initiate(self):\n \"\"\"Initiate any variables needed for recommendation_logic function\"\"\"\n self.graph = GraphFactory.load_db_into_graph()\n self.all_profiles = models.Profile.objects.all()\n self.all_profiles_count = len(self.all_profiles)\n\n def recommendation_logic(self):\n \"\"\"Recommendation logic\"\"\"\n current_profile_count = 0\n for profile in self.all_profiles:\n start_ms = time.time() * 1000.0\n profile_id = profile.id\n current_profile_count = current_profile_count + 1\n results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id))\n for current_tuple in results:\n listing_raw = current_tuple[0]\n listing_id = int(listing_raw.split('-')[1])\n score = current_tuple[1]\n self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)\n end_ms = time.time() * 1000.0\n logger.debug('Calculated Profile {}/{}, took {} ms'.format(current_profile_count, self.all_profiles_count, round(end_ms - start_ms, 3)))\n", "source": "the_stack_v2_python_sparse", "source_path": "ozpcenter/recommend/recommend.py", "source_repo": "ozoneplatform/ozp-backend", "split": "val", "star_events_count": 1} {"blob_id": "387472346d0461269a4b94016b8f09059d456161", "bodies": ["self.w = w\nself.total = sum(w)\nself.l = len(w)", "seed = random.randint(1, self.total)\ncur = 0\nfor i in range(self.l):\n cur += self.w[i]\n if cur >= seed:\n return i"], "bodies_text": "<|body_start_0|>\n self.w = w\n self.total = sum(w)\n self.l = len(w)\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.total)\n cur = 0\n for i in range(self.l):\n cur += self.w[i]\n if cur >= seed:\n return i\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.w = w\n self.total = sum(w)\n self.l = len(w)\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.total)\n cur = 0\n for i in range(self.l):\n cur += self.w[i]\n if cur >= seed:\n return i\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000291", "length_bytes": 1297, "license_type": "no_license", "methods": [{"docstring": ":type w: List[int]", "name": "__init__", "signature": "def __init__(self, w)"}, {"docstring": ":rtype: int", "name": "pickIndex", "signature": "def pickIndex(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, w): :type w: List[int]\n- def pickIndex(self): :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, w): :type w: List[int]\n- def pickIndex(self): :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.w = w\n self.total = sum(w)\n self.l = len(w)\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.total)\n cur = 0\n for i in range(self.l):\n cur += self.w[i]\n if cur >= seed:\n return i\n<|end_body_1|>\n", "revision_id": "fd310ec0a989e003242f1840230aaac150f006f0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n self.w = w\n self.total = sum(w)\n self.l = len(w)\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n seed = random.randint(1, self.total)\n cur = 0\n for i in range(self.l):\n cur += self.w[i]\n if cur >= seed:\n return i\n", "source": "the_stack_v2_python_sparse", "source_path": "900plus/RandomPickwithWeight528.py", "source_repo": "jing1988a/python_fb", "split": "val", "star_events_count": 0} {"blob_id": "a5c8756f99ae47e9b7fc83f8cacb17d248f9dbcd", "bodies": ["self.ad_options = ad_options\nself.child_restore_task_ids = child_restore_task_ids\nself.enable_auto_sync = enable_auto_sync\nself.options = options\nself.oracle_options = oracle_options\nself.restore_task_id = restore_task_id\nself.sql_options = sql_options", "if dictionary is None:\n return None\nad_options = cohesity_management_sdk.models.ad_restore_options.AdRestoreOptions.from_dictionary(dictionary.get('adOptions')) if dictionary.get('adOptions') else None\nchild_restore_task_ids = dictionary.get('childRestoreTaskIds')\nenable_auto_sync = dictionary.get('enableAutoSync')\noptions = cohesity_management_sdk.models.update_restore_task_options.UpdateRestoreTaskOptions.from_dictionary(dictionary.get('options')) if dictionary.get('options') else None\noracle_options = cohesity_management_sdk.models.oracle_update_restore_task_options.OracleUpdateRestoreTaskOptions.from_dictionary(dictionary.get('oracleOptions')) if dictionary.get('oracleOptions') else None\nrestore_task_id = dictionary.get('restoreTaskId')\nsql_options = dictionary.get('sqlOptions')\nreturn cls(ad_options, child_restore_task_ids, enable_auto_sync, options, oracle_options, restore_task_id, sql_options)"], "bodies_text": "<|body_start_0|>\n self.ad_options = ad_options\n self.child_restore_task_ids = child_restore_task_ids\n self.enable_auto_sync = enable_auto_sync\n self.options = options\n self.oracle_options = oracle_options\n self.restore_task_id = restore_task_id\n self.sql_options = sql_options\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ad_options = cohesity_management_sdk.models.ad_restore_options.AdRestoreOptions.from_dictionary(dictionary.get('adOptions')) if dictionary.get('adOptions') else None\n child_restore_task_ids = dictionary.get('childRestoreTaskIds')\n enable_auto_sync = dictionary.get('enableAutoSync')\n options = cohesity_management_sdk.models.update_restore_task_options.UpdateRestoreTaskOptions.from_dictionary(dictionary.get('options')) if dictionary.get('options') else None\n oracle_options = cohesity_management_sdk.models.oracle_update_restore_task_options.OracleUpdateRestoreTaskOptions.from_dictionary(dictionary.get('oracleOptions')) if dictionary.get('oracleOptions') else None\n restore_task_id = dictionary.get('restoreTaskId')\n sql_options = dictionary.get('sqlOptions')\n return cls(ad_options, child_restore_task_ids, enable_auto_sync, options, oracle_options, restore_task_id, sql_options)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int", "class_name": "UpdateRestoreTaskParams", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UpdateRestoreTaskParams:\n \"\"\"Implementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\"\"\"\n\n def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None):\n \"\"\"Constructor for the UpdateRestoreTaskParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ad_options = ad_options\n self.child_restore_task_ids = child_restore_task_ids\n self.enable_auto_sync = enable_auto_sync\n self.options = options\n self.oracle_options = oracle_options\n self.restore_task_id = restore_task_id\n self.sql_options = sql_options\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ad_options = cohesity_management_sdk.models.ad_restore_options.AdRestoreOptions.from_dictionary(dictionary.get('adOptions')) if dictionary.get('adOptions') else None\n child_restore_task_ids = dictionary.get('childRestoreTaskIds')\n enable_auto_sync = dictionary.get('enableAutoSync')\n options = cohesity_management_sdk.models.update_restore_task_options.UpdateRestoreTaskOptions.from_dictionary(dictionary.get('options')) if dictionary.get('options') else None\n oracle_options = cohesity_management_sdk.models.oracle_update_restore_task_options.OracleUpdateRestoreTaskOptions.from_dictionary(dictionary.get('oracleOptions')) if dictionary.get('oracleOptions') else None\n restore_task_id = dictionary.get('restoreTaskId')\n sql_options = dictionary.get('sqlOptions')\n return cls(ad_options, child_restore_task_ids, enable_auto_sync, options, oracle_options, restore_task_id, sql_options)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000292", "length_bytes": 4342, "license_type": "permissive", "methods": [{"docstring": "Constructor for the UpdateRestoreTaskParams class", "name": "__init__", "signature": "def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `UpdateRestoreTaskParams` described below.\n\nClass description:\nImplementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\n\nMethod signatures and docstrings:\n- def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None): Constructor for the UpdateRestoreTaskParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `UpdateRestoreTaskParams` described below.\n\nClass description:\nImplementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\n\nMethod signatures and docstrings:\n- def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None): Constructor for the UpdateRestoreTaskParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass UpdateRestoreTaskParams:\n \"\"\"Implementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\"\"\"\n\n def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None):\n \"\"\"Constructor for the UpdateRestoreTaskParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ad_options = ad_options\n self.child_restore_task_ids = child_restore_task_ids\n self.enable_auto_sync = enable_auto_sync\n self.options = options\n self.oracle_options = oracle_options\n self.restore_task_id = restore_task_id\n self.sql_options = sql_options\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ad_options = cohesity_management_sdk.models.ad_restore_options.AdRestoreOptions.from_dictionary(dictionary.get('adOptions')) if dictionary.get('adOptions') else None\n child_restore_task_ids = dictionary.get('childRestoreTaskIds')\n enable_auto_sync = dictionary.get('enableAutoSync')\n options = cohesity_management_sdk.models.update_restore_task_options.UpdateRestoreTaskOptions.from_dictionary(dictionary.get('options')) if dictionary.get('options') else None\n oracle_options = cohesity_management_sdk.models.oracle_update_restore_task_options.OracleUpdateRestoreTaskOptions.from_dictionary(dictionary.get('oracleOptions')) if dictionary.get('oracleOptions') else None\n restore_task_id = dictionary.get('restoreTaskId')\n sql_options = dictionary.get('sqlOptions')\n return cls(ad_options, child_restore_task_ids, enable_auto_sync, options, oracle_options, restore_task_id, sql_options)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass UpdateRestoreTaskParams:\n \"\"\"Implementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\"\"\"\n\n def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None):\n \"\"\"Constructor for the UpdateRestoreTaskParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UpdateRestoreTaskParams:\n \"\"\"Implementation of the 'UpdateRestoreTaskParams' model. UpdateRestoreTaskParams holds the information to update a Restore Task in Magneto. Attributes: ad_options (AdRestoreOptions): Specifies the Active Directory options to update the Restore Task with. child_restore_task_ids (list of long|int): Specifies the ID of the child restore tasks of 'RestoreTaskId' to which the update is meant. enable_auto_sync (bool): Enables Auto Sync feature for SQL Multi-stage Restore task. options (UpdateRestoreTaskOptions): Specifies generic options to update the restore task. oracle_options (OracleUpdateRestoreTaskOptions): Specifies the oracle options to update the Restore Task with. restore_task_id (long|int\"\"\"\n\n def __init__(self, ad_options=None, child_restore_task_ids=None, enable_auto_sync=None, options=None, oracle_options=None, restore_task_id=None, sql_options=None):\n \"\"\"Constructor for the UpdateRestoreTaskParams class\"\"\"\n self.ad_options = ad_options\n self.child_restore_task_ids = child_restore_task_ids\n self.enable_auto_sync = enable_auto_sync\n self.options = options\n self.oracle_options = oracle_options\n self.restore_task_id = restore_task_id\n self.sql_options = sql_options\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n ad_options = cohesity_management_sdk.models.ad_restore_options.AdRestoreOptions.from_dictionary(dictionary.get('adOptions')) if dictionary.get('adOptions') else None\n child_restore_task_ids = dictionary.get('childRestoreTaskIds')\n enable_auto_sync = dictionary.get('enableAutoSync')\n options = cohesity_management_sdk.models.update_restore_task_options.UpdateRestoreTaskOptions.from_dictionary(dictionary.get('options')) if dictionary.get('options') else None\n oracle_options = cohesity_management_sdk.models.oracle_update_restore_task_options.OracleUpdateRestoreTaskOptions.from_dictionary(dictionary.get('oracleOptions')) if dictionary.get('oracleOptions') else None\n restore_task_id = dictionary.get('restoreTaskId')\n sql_options = dictionary.get('sqlOptions')\n return cls(ad_options, child_restore_task_ids, enable_auto_sync, options, oracle_options, restore_task_id, sql_options)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/update_restore_task_params.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "58fe5db29763d16c78a46c418836a3e2b0df15f5", "bodies": ["context = req.environ['cinder.context']\nvolume = objects.Volume.get_by_id(context, volume_id)\ncontext.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume)\nreturn db.volume_encryption_metadata_get(context, volume_id)", "encryption_item = self.index(req, volume_id)\nif encryption_item is not None:\n return encryption_item[id]\nelse:\n return None"], "bodies_text": "<|body_start_0|>\n context = req.environ['cinder.context']\n volume = objects.Volume.get_by_id(context, volume_id)\n context.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume)\n return db.volume_encryption_metadata_get(context, volume_id)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_item = self.index(req, volume_id)\n if encryption_item is not None:\n return encryption_item[id]\n else:\n return None\n<|end_body_1|>\n", "class_docstring": "The volume encryption metadata API extension.", "class_name": "VolumeEncryptionMetadataController", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VolumeEncryptionMetadataController:\n \"\"\"The volume encryption metadata API extension.\"\"\"\n\n def index(self, req, volume_id):\n \"\"\"Returns the encryption metadata for a given volume.\"\"\"\n <|body_0|>\n\n def show(self, req, volume_id, id):\n \"\"\"Return a single encryption item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = req.environ['cinder.context']\n volume = objects.Volume.get_by_id(context, volume_id)\n context.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume)\n return db.volume_encryption_metadata_get(context, volume_id)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_item = self.index(req, volume_id)\n if encryption_item is not None:\n return encryption_item[id]\n else:\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000293", "length_bytes": 2194, "license_type": "permissive", "methods": [{"docstring": "Returns the encryption metadata for a given volume.", "name": "index", "signature": "def index(self, req, volume_id)"}, {"docstring": "Return a single encryption item.", "name": "show", "signature": "def show(self, req, volume_id, id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000831", "prompt": "Implement the Python class `VolumeEncryptionMetadataController` described below.\n\nClass description:\nThe volume encryption metadata API extension.\n\nMethod signatures and docstrings:\n- def index(self, req, volume_id): Returns the encryption metadata for a given volume.\n- def show(self, req, volume_id, id): Return a single encryption item.", "prompted_full_text": "Implement the Python class `VolumeEncryptionMetadataController` described below.\n\nClass description:\nThe volume encryption metadata API extension.\n\nMethod signatures and docstrings:\n- def index(self, req, volume_id): Returns the encryption metadata for a given volume.\n- def show(self, req, volume_id, id): Return a single encryption item.\n\n<|skeleton|>\nclass VolumeEncryptionMetadataController:\n \"\"\"The volume encryption metadata API extension.\"\"\"\n\n def index(self, req, volume_id):\n \"\"\"Returns the encryption metadata for a given volume.\"\"\"\n <|body_0|>\n\n def show(self, req, volume_id, id):\n \"\"\"Return a single encryption item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = req.environ['cinder.context']\n volume = objects.Volume.get_by_id(context, volume_id)\n context.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume)\n return db.volume_encryption_metadata_get(context, volume_id)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_item = self.index(req, volume_id)\n if encryption_item is not None:\n return encryption_item[id]\n else:\n return None\n<|end_body_1|>\n", "revision_id": "04a5d6b8c28271f6aefe2bbae6a1e16c1c235835", "skeleton": "<|skeleton|>\nclass VolumeEncryptionMetadataController:\n \"\"\"The volume encryption metadata API extension.\"\"\"\n\n def index(self, req, volume_id):\n \"\"\"Returns the encryption metadata for a given volume.\"\"\"\n <|body_0|>\n\n def show(self, req, volume_id, id):\n \"\"\"Return a single encryption item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VolumeEncryptionMetadataController:\n \"\"\"The volume encryption metadata API extension.\"\"\"\n\n def index(self, req, volume_id):\n \"\"\"Returns the encryption metadata for a given volume.\"\"\"\n context = req.environ['cinder.context']\n volume = objects.Volume.get_by_id(context, volume_id)\n context.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume)\n return db.volume_encryption_metadata_get(context, volume_id)\n\n def show(self, req, volume_id, id):\n \"\"\"Return a single encryption item.\"\"\"\n encryption_item = self.index(req, volume_id)\n if encryption_item is not None:\n return encryption_item[id]\n else:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "cinder/api/contrib/volume_encryption_metadata.py", "source_repo": "LINBIT/openstack-cinder", "split": "val", "star_events_count": 9} {"blob_id": "f70fd974c27e27efe546ebcb21fbaff8e3ff9664", "bodies": ["path = super().save(dirpath, data, image, extension, **kwargs)\nif self.model is not None:\n with open(join(path, 'model.pkl'), 'wb') as file:\n pickle.dump(self.model, file)\nreturn path", "io = IO()\nvalues_path = join(path, 'values.npy')\nif exists(values_path):\n values = io.read_npy(values_path)\nelse:\n values = None\nparameters = io.read_json(join(path, 'parameters.json'))\nwith open(join(path, 'model.pkl'), 'rb') as file:\n model = pickle.load(file)\nif values is not None:\n model.values = np.log(values)\nreturn cls(values, model=model, **parameters)"], "bodies_text": "<|body_start_0|>\n path = super().save(dirpath, data, image, extension, **kwargs)\n if self.model is not None:\n with open(join(path, 'model.pkl'), 'wb') as file:\n pickle.dump(self.model, file)\n return path\n<|end_body_0|>\n\n<|body_start_1|>\n io = IO()\n values_path = join(path, 'values.npy')\n if exists(values_path):\n values = io.read_npy(values_path)\n else:\n values = None\n parameters = io.read_json(join(path, 'parameters.json'))\n with open(join(path, 'model.pkl'), 'rb') as file:\n model = pickle.load(file)\n if values is not None:\n model.values = np.log(values)\n return cls(values, model=model, **parameters)\n<|end_body_1|>\n", "class_docstring": "Methods for saving and loading classifier objects.", "class_name": "MixtureModelIO", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MixtureModelIO:\n \"\"\"Methods for saving and loading classifier objects.\"\"\"\n\n def save(self, dirpath, data=False, image=True, extension=None, **kwargs):\n \"\"\"Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\"\"\"\n <|body_0|>\n\n def load(cls, path):\n \"\"\"Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = super().save(dirpath, data, image, extension, **kwargs)\n if self.model is not None:\n with open(join(path, 'model.pkl'), 'wb') as file:\n pickle.dump(self.model, file)\n return path\n<|end_body_0|>\n\n<|body_start_1|>\n io = IO()\n values_path = join(path, 'values.npy')\n if exists(values_path):\n values = io.read_npy(values_path)\n else:\n values = None\n parameters = io.read_json(join(path, 'parameters.json'))\n with open(join(path, 'model.pkl'), 'rb') as file:\n model = pickle.load(file)\n if values is not None:\n model.values = np.log(values)\n return cls(values, model=model, **parameters)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000294", "length_bytes": 9152, "license_type": "permissive", "methods": [{"docstring": "Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering", "name": "save", "signature": "def save(self, dirpath, data=False, image=True, extension=None, **kwargs)"}, {"docstring": "Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)", "name": "load", "signature": "def load(cls, path)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003972", "prompt": "Implement the Python class `MixtureModelIO` described below.\n\nClass description:\nMethods for saving and loading classifier objects.\n\nMethod signatures and docstrings:\n- def save(self, dirpath, data=False, image=True, extension=None, **kwargs): Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\n- def load(cls, path): Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)", "prompted_full_text": "Implement the Python class `MixtureModelIO` described below.\n\nClass description:\nMethods for saving and loading classifier objects.\n\nMethod signatures and docstrings:\n- def save(self, dirpath, data=False, image=True, extension=None, **kwargs): Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\n- def load(cls, path): Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)\n\n<|skeleton|>\nclass MixtureModelIO:\n \"\"\"Methods for saving and loading classifier objects.\"\"\"\n\n def save(self, dirpath, data=False, image=True, extension=None, **kwargs):\n \"\"\"Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\"\"\"\n <|body_0|>\n\n def load(cls, path):\n \"\"\"Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = super().save(dirpath, data, image, extension, **kwargs)\n if self.model is not None:\n with open(join(path, 'model.pkl'), 'wb') as file:\n pickle.dump(self.model, file)\n return path\n<|end_body_0|>\n\n<|body_start_1|>\n io = IO()\n values_path = join(path, 'values.npy')\n if exists(values_path):\n values = io.read_npy(values_path)\n else:\n values = None\n parameters = io.read_json(join(path, 'parameters.json'))\n with open(join(path, 'model.pkl'), 'rb') as file:\n model = pickle.load(file)\n if values is not None:\n model.values = np.log(values)\n return cls(values, model=model, **parameters)\n<|end_body_1|>\n", "revision_id": "4a622c3f5fed4456c3b9240f5a96428789fde9bd", "skeleton": "<|skeleton|>\nclass MixtureModelIO:\n \"\"\"Methods for saving and loading classifier objects.\"\"\"\n\n def save(self, dirpath, data=False, image=True, extension=None, **kwargs):\n \"\"\"Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\"\"\"\n <|body_0|>\n\n def load(cls, path):\n \"\"\"Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MixtureModelIO:\n \"\"\"Methods for saving and loading classifier objects.\"\"\"\n\n def save(self, dirpath, data=False, image=True, extension=None, **kwargs):\n \"\"\"Save classifier to specified path. Args: dirpath (str) - directory in which classifier is to be saved data (bool) - if True, save training data image (bool) - if True, save labeled histogram image extension (str) - directory name extension kwargs: keyword arguments for image rendering\"\"\"\n path = super().save(dirpath, data, image, extension, **kwargs)\n if self.model is not None:\n with open(join(path, 'model.pkl'), 'wb') as file:\n pickle.dump(self.model, file)\n return path\n\n def load(cls, path):\n \"\"\"Load classifier from file. Args: path (str) - path to classifier directory Returns: classifier (Classifier derivative)\"\"\"\n io = IO()\n values_path = join(path, 'values.npy')\n if exists(values_path):\n values = io.read_npy(values_path)\n else:\n values = None\n parameters = io.read_json(join(path, 'parameters.json'))\n with open(join(path, 'model.pkl'), 'rb') as file:\n model = pickle.load(file)\n if values is not None:\n model.values = np.log(values)\n return cls(values, model=model, **parameters)\n", "source": "the_stack_v2_python_sparse", "source_path": "flyqma/annotation/classification/mixtures.py", "source_repo": "sbernasek/flyqma", "split": "val", "star_events_count": 1} {"blob_id": "b71a1b22ae30567ed7a662c8aa59f06caf1651f3", "bodies": ["app.config['TESTING'] = True\napp.testing = True\nself.client = app.test_client()", "ret = self.client.post('/login', data={})\nresp = ret.data\nresp = json.loads(resp)\nself.assertIn('code', resp)\nself.assertEqual(resp['code'], 1)\nret = self.client.post('/login', data={'user_name': 'admin'})\nresp = ret.data\nresp = json.loads(resp)\nself.assertIn('code', resp)\nself.assertEqual(resp['code'], 1)\nret = self.client.post('/login', data={'password': 'python'})\nresp = ret.data\nresp = json.loads(resp)\nself.assertIn('code', resp)\nself.assertEqual(resp['code'], 1)", "ret = self.client.post('/login', data={'user_name': 'itcast', 'password': 'itcast'})\nresp = ret.data\nresp = json.loads(resp)\nself.assertIn('code', resp)\nself.assertEqual(resp['code'], 2)"], "bodies_text": "<|body_start_0|>\n app.config['TESTING'] = True\n app.testing = True\n self.client = app.test_client()\n<|end_body_0|>\n\n<|body_start_1|>\n ret = self.client.post('/login', data={})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'user_name': 'admin'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'password': 'python'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = self.client.post('/login', data={'user_name': 'itcast', 'password': 'itcast'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 2)\n<|end_body_2|>\n", "class_docstring": "构造单元测试案例", "class_name": "LoginTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoginTest:\n \"\"\"构造单元测试案例\"\"\"\n\n def setUp(self):\n \"\"\"在测试之前,先被执行\"\"\"\n <|body_0|>\n\n def test_empty_name_password(self):\n \"\"\"测试用户名密码不完整的情况\"\"\"\n <|body_1|>\n\n def test_wrong_user_name_password(self):\n \"\"\"测试用户名家和密码错误\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n app.config['TESTING'] = True\n app.testing = True\n self.client = app.test_client()\n<|end_body_0|>\n\n<|body_start_1|>\n ret = self.client.post('/login', data={})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'user_name': 'admin'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'password': 'python'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = self.client.post('/login', data={'user_name': 'itcast', 'password': 'itcast'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 2)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000295", "length_bytes": 2624, "license_type": "no_license", "methods": [{"docstring": "在测试之前,先被执行", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "测试用户名密码不完整的情况", "name": "test_empty_name_password", "signature": "def test_empty_name_password(self)"}, {"docstring": "测试用户名家和密码错误", "name": "test_wrong_user_name_password", "signature": "def test_wrong_user_name_password(self)"}], "n_methods": 3, "prompt": "Implement the Python class `LoginTest` described below.\n\nClass description:\n构造单元测试案例\n\nMethod signatures and docstrings:\n- def setUp(self): 在测试之前,先被执行\n- def test_empty_name_password(self): 测试用户名密码不完整的情况\n- def test_wrong_user_name_password(self): 测试用户名家和密码错误", "prompted_full_text": "Implement the Python class `LoginTest` described below.\n\nClass description:\n构造单元测试案例\n\nMethod signatures and docstrings:\n- def setUp(self): 在测试之前,先被执行\n- def test_empty_name_password(self): 测试用户名密码不完整的情况\n- def test_wrong_user_name_password(self): 测试用户名家和密码错误\n\n<|skeleton|>\nclass LoginTest:\n \"\"\"构造单元测试案例\"\"\"\n\n def setUp(self):\n \"\"\"在测试之前,先被执行\"\"\"\n <|body_0|>\n\n def test_empty_name_password(self):\n \"\"\"测试用户名密码不完整的情况\"\"\"\n <|body_1|>\n\n def test_wrong_user_name_password(self):\n \"\"\"测试用户名家和密码错误\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n app.config['TESTING'] = True\n app.testing = True\n self.client = app.test_client()\n<|end_body_0|>\n\n<|body_start_1|>\n ret = self.client.post('/login', data={})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'user_name': 'admin'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'password': 'python'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = self.client.post('/login', data={'user_name': 'itcast', 'password': 'itcast'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 2)\n<|end_body_2|>\n", "revision_id": "217d594a3c0cba1e52550f74d100cc5023fb415b", "skeleton": "<|skeleton|>\nclass LoginTest:\n \"\"\"构造单元测试案例\"\"\"\n\n def setUp(self):\n \"\"\"在测试之前,先被执行\"\"\"\n <|body_0|>\n\n def test_empty_name_password(self):\n \"\"\"测试用户名密码不完整的情况\"\"\"\n <|body_1|>\n\n def test_wrong_user_name_password(self):\n \"\"\"测试用户名家和密码错误\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoginTest:\n \"\"\"构造单元测试案例\"\"\"\n\n def setUp(self):\n \"\"\"在测试之前,先被执行\"\"\"\n app.config['TESTING'] = True\n app.testing = True\n self.client = app.test_client()\n\n def test_empty_name_password(self):\n \"\"\"测试用户名密码不完整的情况\"\"\"\n ret = self.client.post('/login', data={})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'user_name': 'admin'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n ret = self.client.post('/login', data={'password': 'python'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 1)\n\n def test_wrong_user_name_password(self):\n \"\"\"测试用户名家和密码错误\"\"\"\n ret = self.client.post('/login', data={'user_name': 'itcast', 'password': 'itcast'})\n resp = ret.data\n resp = json.loads(resp)\n self.assertIn('code', resp)\n self.assertEqual(resp['code'], 2)\n", "source": "the_stack_v2_python_sparse", "source_path": "Flask_Heima/Day04/test.py", "source_repo": "biabulinxi/Python-ML-DL", "split": "val", "star_events_count": 0} {"blob_id": "b706eeaa36af3986e9e2e68ec06176b593cb293a", "bodies": ["GC.read()\nif os.path.exists(CONFIG_OVERWRITE):\n cls.overwrite(CONFIG_OVERWRITE)", "conf_overwrite: dict = GC.read_conf(config_file_overwrite)\nfor sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that don't exist in base config!!! Abort!!!\")\n sys.exit(1)"], "bodies_text": "<|body_start_0|>\n GC.read()\n if os.path.exists(CONFIG_OVERWRITE):\n cls.overwrite(CONFIG_OVERWRITE)\n<|end_body_0|>\n\n<|body_start_1|>\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that don't exist in base config!!! Abort!!!\")\n sys.exit(1)\n<|end_body_1|>\n", "class_docstring": "Handle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.", "class_name": "GCO", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GCO:\n \"\"\"Handle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\"\"\"\n\n def read(cls):\n \"\"\"Overwrite version of read config file\"\"\"\n <|body_0|>\n\n def overwrite(cls, config_file_overwrite: str):\n \"\"\"Update im-momory conf with the overwrite config file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n GC.read()\n if os.path.exists(CONFIG_OVERWRITE):\n cls.overwrite(CONFIG_OVERWRITE)\n<|end_body_0|>\n\n<|body_start_1|>\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that don't exist in base config!!! Abort!!!\")\n sys.exit(1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000296", "length_bytes": 6236, "license_type": "permissive", "methods": [{"docstring": "Overwrite version of read config file", "name": "read", "signature": "def read(cls)"}, {"docstring": "Update im-momory conf with the overwrite config file.", "name": "overwrite", "signature": "def overwrite(cls, config_file_overwrite: str)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001789", "prompt": "Implement the Python class `GCO` described below.\n\nClass description:\nHandle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\n\nMethod signatures and docstrings:\n- def read(cls): Overwrite version of read config file\n- def overwrite(cls, config_file_overwrite: str): Update im-momory conf with the overwrite config file.", "prompted_full_text": "Implement the Python class `GCO` described below.\n\nClass description:\nHandle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\n\nMethod signatures and docstrings:\n- def read(cls): Overwrite version of read config file\n- def overwrite(cls, config_file_overwrite: str): Update im-momory conf with the overwrite config file.\n\n<|skeleton|>\nclass GCO:\n \"\"\"Handle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\"\"\"\n\n def read(cls):\n \"\"\"Overwrite version of read config file\"\"\"\n <|body_0|>\n\n def overwrite(cls, config_file_overwrite: str):\n \"\"\"Update im-momory conf with the overwrite config file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n GC.read()\n if os.path.exists(CONFIG_OVERWRITE):\n cls.overwrite(CONFIG_OVERWRITE)\n<|end_body_0|>\n\n<|body_start_1|>\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that don't exist in base config!!! Abort!!!\")\n sys.exit(1)\n<|end_body_1|>\n", "revision_id": "4dcb8a0044683372abb4bfad69fc4ba71162ecd5", "skeleton": "<|skeleton|>\nclass GCO:\n \"\"\"Handle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\"\"\"\n\n def read(cls):\n \"\"\"Overwrite version of read config file\"\"\"\n <|body_0|>\n\n def overwrite(cls, config_file_overwrite: str):\n \"\"\"Update im-momory conf with the overwrite config file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GCO:\n \"\"\"Handle overwrite config file. Take the module as helper instead of the derived class of GlobalConfig to be back compatible and avoid confusing. It only updates the global in-memory conf dict. It either cannot be merged with GlobalConfig for design reason. GlobalConfig should not depend on LOG_TYPE, but we have to depend on LOG_TYPE here to load the overwrite config file.\"\"\"\n\n def read(cls):\n \"\"\"Overwrite version of read config file\"\"\"\n GC.read()\n if os.path.exists(CONFIG_OVERWRITE):\n cls.overwrite(CONFIG_OVERWRITE)\n\n def overwrite(cls, config_file_overwrite: str):\n \"\"\"Update im-momory conf with the overwrite config file.\"\"\"\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that don't exist in base config!!! Abort!!!\")\n sys.exit(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "analyzer/utils/data_helper.py", "source_repo": "hayhan/loganalyzer", "split": "val", "star_events_count": 2} {"blob_id": "579516258348d647606b131e9eb94318f9a306cf", "bodies": ["if not nums:\n return 0\nnums_set = set(nums)\nlongestConsecutiveLen = 0\ncount = 0\nfor num in nums_set:\n if num - 1 not in nums_set:\n current_num = num\n count = 1\n while current_num + 1 in nums_set:\n count += 1\n current_num += 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\nreturn longestConsecutiveLen", "if not nums:\n return 0\nnums.sort()\nlongestConsecutiveLen = 1\ncount = 1\nfor i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n continue\n elif nums[i] - nums[i - 1] == 1:\n count += 1\n else:\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n count = 1\nlongestConsecutiveLen = max(longestConsecutiveLen, count)\nreturn longestConsecutiveLen"], "bodies_text": "<|body_start_0|>\n if not nums:\n return 0\n nums_set = set(nums)\n longestConsecutiveLen = 0\n count = 0\n for num in nums_set:\n if num - 1 not in nums_set:\n current_num = num\n count = 1\n while current_num + 1 in nums_set:\n count += 1\n current_num += 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n nums.sort()\n longestConsecutiveLen = 1\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n continue\n elif nums[i] - nums[i - 1] == 1:\n count += 1\n else:\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n count = 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestConsecutive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def longestConsecutiveV0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums:\n return 0\n nums_set = set(nums)\n longestConsecutiveLen = 0\n count = 0\n for num in nums_set:\n if num - 1 not in nums_set:\n current_num = num\n count = 1\n while current_num + 1 in nums_set:\n count += 1\n current_num += 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n nums.sort()\n longestConsecutiveLen = 1\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n continue\n elif nums[i] - nums[i - 1] == 1:\n count += 1\n else:\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n count = 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000297", "length_bytes": 1208, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: int", "name": "longestConsecutive", "signature": "def longestConsecutive(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: int", "name": "longestConsecutiveV0", "signature": "def longestConsecutiveV0(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002249", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestConsecutive(self, nums): :type nums: List[int] :rtype: int\n- def longestConsecutiveV0(self, nums): :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestConsecutive(self, nums): :type nums: List[int] :rtype: int\n- def longestConsecutiveV0(self, nums): :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def longestConsecutive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def longestConsecutiveV0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums:\n return 0\n nums_set = set(nums)\n longestConsecutiveLen = 0\n count = 0\n for num in nums_set:\n if num - 1 not in nums_set:\n current_num = num\n count = 1\n while current_num + 1 in nums_set:\n count += 1\n current_num += 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n nums.sort()\n longestConsecutiveLen = 1\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n continue\n elif nums[i] - nums[i - 1] == 1:\n count += 1\n else:\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n count = 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n<|end_body_1|>\n", "revision_id": "76fdcec59b48c69120ebcf13a5374e6fc480c257", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestConsecutive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def longestConsecutiveV0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def longestConsecutive(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if not nums:\n return 0\n nums_set = set(nums)\n longestConsecutiveLen = 0\n count = 0\n for num in nums_set:\n if num - 1 not in nums_set:\n current_num = num\n count = 1\n while current_num + 1 in nums_set:\n count += 1\n current_num += 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n\n def longestConsecutiveV0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if not nums:\n return 0\n nums.sort()\n longestConsecutiveLen = 1\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n continue\n elif nums[i] - nums[i - 1] == 1:\n count += 1\n else:\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n count = 1\n longestConsecutiveLen = max(longestConsecutiveLen, count)\n return longestConsecutiveLen\n", "source": "the_stack_v2_python_sparse", "source_path": "lulu/longestConsecutiveSequence.py", "source_repo": "luluxing3/LeetCode", "split": "val", "star_events_count": 1} {"blob_id": "efa97d371bfd44da5e8a603f5e56499f7c0fd67d", "bodies": ["filtered = [x for x in self if addr in x.ucqm]\nblocks = sorted(filtered, key=lambda x: x.ucqm[addr]['mov_rssi'], reverse=True)\nreturn ResourcePool(blocks)", "blocks = []\nfor block in self.__iter__():\n if block.channel == channel:\n blocks.append(block)\nreturn ResourcePool(blocks)", "blocks = []\nfor block in self.__iter__():\n if block.band == band:\n blocks.append(block)\nreturn ResourcePool(blocks)", "if self:\n block = list.__getitem__(self, 0)\n return ResourcePool([block])\nreturn ResourcePool()", "if self:\n block = list.__getitem__(self, -1)\n return ResourcePool([block])\nreturn ResourcePool()"], "bodies_text": "<|body_start_0|>\n filtered = [x for x in self if addr in x.ucqm]\n blocks = sorted(filtered, key=lambda x: x.ucqm[addr]['mov_rssi'], reverse=True)\n return ResourcePool(blocks)\n<|end_body_0|>\n\n<|body_start_1|>\n blocks = []\n for block in self.__iter__():\n if block.channel == channel:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_1|>\n\n<|body_start_2|>\n blocks = []\n for block in self.__iter__():\n if block.band == band:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_2|>\n\n<|body_start_3|>\n if self:\n block = list.__getitem__(self, 0)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_3|>\n\n<|body_start_4|>\n if self:\n block = list.__getitem__(self, -1)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_4|>\n", "class_docstring": "Resource pool. This extends the list in order to add a few filtering and sorting methods", "class_name": "ResourcePool", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResourcePool:\n \"\"\"Resource pool. This extends the list in order to add a few filtering and sorting methods\"\"\"\n\n def sort_by_rssi(self, addr):\n \"\"\"Return list sorted by rssi for the specific address.\"\"\"\n <|body_0|>\n\n def filter_by_channel(self, channel):\n \"\"\"Return list sorted filtered by channel.\"\"\"\n <|body_1|>\n\n def filter_by_band(self, band):\n \"\"\"Return list sorted filtered by band.\"\"\"\n <|body_2|>\n\n def first(self):\n \"\"\"Return first entry in the list.\"\"\"\n <|body_3|>\n\n def last(self):\n \"\"\"Return last entry in the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filtered = [x for x in self if addr in x.ucqm]\n blocks = sorted(filtered, key=lambda x: x.ucqm[addr]['mov_rssi'], reverse=True)\n return ResourcePool(blocks)\n<|end_body_0|>\n\n<|body_start_1|>\n blocks = []\n for block in self.__iter__():\n if block.channel == channel:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_1|>\n\n<|body_start_2|>\n blocks = []\n for block in self.__iter__():\n if block.band == band:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_2|>\n\n<|body_start_3|>\n if self:\n block = list.__getitem__(self, 0)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_3|>\n\n<|body_start_4|>\n if self:\n block = list.__getitem__(self, -1)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000298", "length_bytes": 7116, "license_type": "permissive", "methods": [{"docstring": "Return list sorted by rssi for the specific address.", "name": "sort_by_rssi", "signature": "def sort_by_rssi(self, addr)"}, {"docstring": "Return list sorted filtered by channel.", "name": "filter_by_channel", "signature": "def filter_by_channel(self, channel)"}, {"docstring": "Return list sorted filtered by band.", "name": "filter_by_band", "signature": "def filter_by_band(self, band)"}, {"docstring": "Return first entry in the list.", "name": "first", "signature": "def first(self)"}, {"docstring": "Return last entry in the list.", "name": "last", "signature": "def last(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004450", "prompt": "Implement the Python class `ResourcePool` described below.\n\nClass description:\nResource pool. This extends the list in order to add a few filtering and sorting methods\n\nMethod signatures and docstrings:\n- def sort_by_rssi(self, addr): Return list sorted by rssi for the specific address.\n- def filter_by_channel(self, channel): Return list sorted filtered by channel.\n- def filter_by_band(self, band): Return list sorted filtered by band.\n- def first(self): Return first entry in the list.\n- def last(self): Return last entry in the list.", "prompted_full_text": "Implement the Python class `ResourcePool` described below.\n\nClass description:\nResource pool. This extends the list in order to add a few filtering and sorting methods\n\nMethod signatures and docstrings:\n- def sort_by_rssi(self, addr): Return list sorted by rssi for the specific address.\n- def filter_by_channel(self, channel): Return list sorted filtered by channel.\n- def filter_by_band(self, band): Return list sorted filtered by band.\n- def first(self): Return first entry in the list.\n- def last(self): Return last entry in the list.\n\n<|skeleton|>\nclass ResourcePool:\n \"\"\"Resource pool. This extends the list in order to add a few filtering and sorting methods\"\"\"\n\n def sort_by_rssi(self, addr):\n \"\"\"Return list sorted by rssi for the specific address.\"\"\"\n <|body_0|>\n\n def filter_by_channel(self, channel):\n \"\"\"Return list sorted filtered by channel.\"\"\"\n <|body_1|>\n\n def filter_by_band(self, band):\n \"\"\"Return list sorted filtered by band.\"\"\"\n <|body_2|>\n\n def first(self):\n \"\"\"Return first entry in the list.\"\"\"\n <|body_3|>\n\n def last(self):\n \"\"\"Return last entry in the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filtered = [x for x in self if addr in x.ucqm]\n blocks = sorted(filtered, key=lambda x: x.ucqm[addr]['mov_rssi'], reverse=True)\n return ResourcePool(blocks)\n<|end_body_0|>\n\n<|body_start_1|>\n blocks = []\n for block in self.__iter__():\n if block.channel == channel:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_1|>\n\n<|body_start_2|>\n blocks = []\n for block in self.__iter__():\n if block.band == band:\n blocks.append(block)\n return ResourcePool(blocks)\n<|end_body_2|>\n\n<|body_start_3|>\n if self:\n block = list.__getitem__(self, 0)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_3|>\n\n<|body_start_4|>\n if self:\n block = list.__getitem__(self, -1)\n return ResourcePool([block])\n return ResourcePool()\n<|end_body_4|>\n", "revision_id": "ad81b04937ff1db82ea2a4e8218422ca3437401c", "skeleton": "<|skeleton|>\nclass ResourcePool:\n \"\"\"Resource pool. This extends the list in order to add a few filtering and sorting methods\"\"\"\n\n def sort_by_rssi(self, addr):\n \"\"\"Return list sorted by rssi for the specific address.\"\"\"\n <|body_0|>\n\n def filter_by_channel(self, channel):\n \"\"\"Return list sorted filtered by channel.\"\"\"\n <|body_1|>\n\n def filter_by_band(self, band):\n \"\"\"Return list sorted filtered by band.\"\"\"\n <|body_2|>\n\n def first(self):\n \"\"\"Return first entry in the list.\"\"\"\n <|body_3|>\n\n def last(self):\n \"\"\"Return last entry in the list.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ResourcePool:\n \"\"\"Resource pool. This extends the list in order to add a few filtering and sorting methods\"\"\"\n\n def sort_by_rssi(self, addr):\n \"\"\"Return list sorted by rssi for the specific address.\"\"\"\n filtered = [x for x in self if addr in x.ucqm]\n blocks = sorted(filtered, key=lambda x: x.ucqm[addr]['mov_rssi'], reverse=True)\n return ResourcePool(blocks)\n\n def filter_by_channel(self, channel):\n \"\"\"Return list sorted filtered by channel.\"\"\"\n blocks = []\n for block in self.__iter__():\n if block.channel == channel:\n blocks.append(block)\n return ResourcePool(blocks)\n\n def filter_by_band(self, band):\n \"\"\"Return list sorted filtered by band.\"\"\"\n blocks = []\n for block in self.__iter__():\n if block.band == band:\n blocks.append(block)\n return ResourcePool(blocks)\n\n def first(self):\n \"\"\"Return first entry in the list.\"\"\"\n if self:\n block = list.__getitem__(self, 0)\n return ResourcePool([block])\n return ResourcePool()\n\n def last(self):\n \"\"\"Return last entry in the list.\"\"\"\n if self:\n block = list.__getitem__(self, -1)\n return ResourcePool([block])\n return ResourcePool()\n", "source": "the_stack_v2_python_sparse", "source_path": "empower/managers/ranmanager/lvapp/resourcepool.py", "source_repo": "5g-empower/empower-runtime", "split": "val", "star_events_count": 55} {"blob_id": "cb6f3da2f7cb83f8ab52ee54c9479923a60a8bb7", "bodies": ["dic = {*wordDict}\nn = len(s)\ndp = [0] * (n + 1)\ndp[0] = 1\nfor i in range(1, n + 1):\n for j in wordDict:\n if s[i - len(j):i] in dic and dp[i - len(j)] == 1:\n dp[i] = 1\n break\nreturn True if dp[-1] == 1 else False", "@lru_cache()\ndef dfs(l):\n if l == len(s):\n self.flag = 1\n return\n for word in wordDict:\n if len(s[l:]) >= len(word) and s[l:].startswith(word):\n dfs(l + len(word))\ndfs(0)\nreturn True if self.flag == 1 else False"], "bodies_text": "<|body_start_0|>\n dic = {*wordDict}\n n = len(s)\n dp = [0] * (n + 1)\n dp[0] = 1\n for i in range(1, n + 1):\n for j in wordDict:\n if s[i - len(j):i] in dic and dp[i - len(j)] == 1:\n dp[i] = 1\n break\n return True if dp[-1] == 1 else False\n<|end_body_0|>\n\n<|body_start_1|>\n @lru_cache()\n def dfs(l):\n if l == len(s):\n self.flag = 1\n return\n for word in wordDict:\n if len(s[l:]) >= len(word) and s[l:].startswith(word):\n dfs(l + len(word))\n dfs(0)\n return True if self.flag == 1 else False\n<|end_body_1|>\n", "class_docstring": "题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\"\"\"\n\n def wordBreak1(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:动态规划法 1. 判断s中每个位置前面是否有\"\"\"\n <|body_0|>\n\n def wordBreak2(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {*wordDict}\n n = len(s)\n dp = [0] * (n + 1)\n dp[0] = 1\n for i in range(1, n + 1):\n for j in wordDict:\n if s[i - len(j):i] in dic and dp[i - len(j)] == 1:\n dp[i] = 1\n break\n return True if dp[-1] == 1 else False\n<|end_body_0|>\n\n<|body_start_1|>\n @lru_cache()\n def dfs(l):\n if l == len(s):\n self.flag = 1\n return\n for word in wordDict:\n if len(s[l:]) >= len(word) and s[l:].startswith(word):\n dfs(l + len(word))\n dfs(0)\n return True if self.flag == 1 else False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000299", "length_bytes": 2350, "license_type": "no_license", "methods": [{"docstring": "思路:动态规划法 1. 判断s中每个位置前面是否有", "name": "wordBreak1", "signature": "def wordBreak1(self, s: str, wordDict: List[str]) -> bool"}, {"docstring": "思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.", "name": "wordBreak2", "signature": "def wordBreak2(self, s: str, wordDict: List[str]) -> bool"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\n题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\n\nMethod signatures and docstrings:\n- def wordBreak1(self, s: str, wordDict: List[str]) -> bool: 思路:动态规划法 1. 判断s中每个位置前面是否有\n- def wordBreak2(self, s: str, wordDict: List[str]) -> bool: 思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\n题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\n\nMethod signatures and docstrings:\n- def wordBreak1(self, s: str, wordDict: List[str]) -> bool: 思路:动态规划法 1. 判断s中每个位置前面是否有\n- def wordBreak2(self, s: str, wordDict: List[str]) -> bool: 思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.\n\n<|skeleton|>\nclass Solution:\n \"\"\"题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\"\"\"\n\n def wordBreak1(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:动态规划法 1. 判断s中每个位置前面是否有\"\"\"\n <|body_0|>\n\n def wordBreak2(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {*wordDict}\n n = len(s)\n dp = [0] * (n + 1)\n dp[0] = 1\n for i in range(1, n + 1):\n for j in wordDict:\n if s[i - len(j):i] in dic and dp[i - len(j)] == 1:\n dp[i] = 1\n break\n return True if dp[-1] == 1 else False\n<|end_body_0|>\n\n<|body_start_1|>\n @lru_cache()\n def dfs(l):\n if l == len(s):\n self.flag = 1\n return\n for word in wordDict:\n if len(s[l:]) >= len(word) and s[l:].startswith(word):\n dfs(l + len(word))\n dfs(0)\n return True if self.flag == 1 else False\n<|end_body_1|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\"\"\"\n\n def wordBreak1(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:动态规划法 1. 判断s中每个位置前面是否有\"\"\"\n <|body_0|>\n\n def wordBreak2(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n \"\"\"题意:判断给定的s能否由wordDict中的word连起来,word可以使用多次\"\"\"\n\n def wordBreak1(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:动态规划法 1. 判断s中每个位置前面是否有\"\"\"\n dic = {*wordDict}\n n = len(s)\n dp = [0] * (n + 1)\n dp[0] = 1\n for i in range(1, n + 1):\n for j in wordDict:\n if s[i - len(j):i] in dic and dp[i - len(j)] == 1:\n dp[i] = 1\n break\n return True if dp[-1] == 1 else False\n\n def wordBreak2(self, s: str, wordDict: List[str]) -> bool:\n \"\"\"思路:dfs 1. 用字符串s去wordDict中匹配,匹配到就从s剩下的字符串中匹配wordDict,直到正好匹配完s 2.\"\"\"\n @lru_cache()\n def dfs(l):\n if l == len(s):\n self.flag = 1\n return\n for word in wordDict:\n if len(s[l:]) >= len(word) and s[l:].startswith(word):\n dfs(l + len(word))\n dfs(0)\n return True if self.flag == 1 else False\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/动态规划法(dp)/139. 单词拆分.py", "source_repo": "yiming1012/MyLeetCode", "split": "val", "star_events_count": 2} {"blob_id": "89eda3da74e5dbc3b50d3c619fe0c0e9db991f61", "bodies": ["self.month = month\nself.net = net\nself.additional_properties = additional_properties", "if dictionary is None:\n return None\nmonth = dictionary.get('month')\nnet = dictionary.get('net')\nfor key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\nreturn cls(month, net, dictionary)"], "bodies_text": "<|body_start_0|>\n self.month = month\n self.net = net\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n month = dictionary.get('month')\n net = dictionary.get('net')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(month, net, dictionary)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams", "class_name": "NetMonthly", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NetMonthly:\n \"\"\"Implementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\"\"\"\n\n def __init__(self, month=None, net=None, additional_properties={}):\n \"\"\"Constructor for the NetMonthly class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.month = month\n self.net = net\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n month = dictionary.get('month')\n net = dictionary.get('net')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(month, net, dictionary)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000300", "length_bytes": 1874, "license_type": "permissive", "methods": [{"docstring": "Constructor for the NetMonthly class", "name": "__init__", "signature": "def __init__(self, month=None, net=None, additional_properties={})"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004442", "prompt": "Implement the Python class `NetMonthly` described below.\n\nClass description:\nImplementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\n\nMethod signatures and docstrings:\n- def __init__(self, month=None, net=None, additional_properties={}): Constructor for the NetMonthly class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `NetMonthly` described below.\n\nClass description:\nImplementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\n\nMethod signatures and docstrings:\n- def __init__(self, month=None, net=None, additional_properties={}): Constructor for the NetMonthly class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass NetMonthly:\n \"\"\"Implementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\"\"\"\n\n def __init__(self, month=None, net=None, additional_properties={}):\n \"\"\"Constructor for the NetMonthly class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.month = month\n self.net = net\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n month = dictionary.get('month')\n net = dictionary.get('net')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(month, net, dictionary)\n<|end_body_1|>\n", "revision_id": "b2ab1ded435db75c78d42261f5e4acd2a3061487", "skeleton": "<|skeleton|>\nclass NetMonthly:\n \"\"\"Implementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\"\"\"\n\n def __init__(self, month=None, net=None, additional_properties={}):\n \"\"\"Constructor for the NetMonthly class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NetMonthly:\n \"\"\"Implementation of the 'NetMonthly' model. TODO: type model description here. Attributes: month (long|int): Timestamp for the first day of this month net (float): Total income during the given month, across all income streams\"\"\"\n\n def __init__(self, month=None, net=None, additional_properties={}):\n \"\"\"Constructor for the NetMonthly class\"\"\"\n self.month = month\n self.net = net\n self.additional_properties = additional_properties\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n month = dictionary.get('month')\n net = dictionary.get('net')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(month, net, dictionary)\n", "source": "the_stack_v2_python_sparse", "source_path": "finicityapi/models/net_monthly.py", "source_repo": "monarchmoney/finicity-python", "split": "val", "star_events_count": 0} {"blob_id": "40cc93c73c7358a03e501acc0f7007eb0f1a487d", "bodies": ["family, face, font = self.match(fontStyle, mode)\nbitmapFont = wxBitmapFont(fontStyle, font=font)\nself.addFont(fontStyle, bitmapFont)\nreturn bitmapFont", "family, face = self.FAMILYMAPPING.get('SERIF')\nif fontStyle and fontStyle.family:\n for specifier in fontStyle.family:\n specifier = specifier.lower()\n current = self.FAMILYMAPPING.get(specifier.upper())\n if current:\n family, face = current\n break\n for name in self.enumerate():\n if name.find(specifier) > -1:\n self.FAMILYMAPPING[specifier] = (wx.DEFAULT, name)\n family, face = (wx.DEFAULT, name)\nreturn self.calculatePointSize(fontStyle, family, face, mode)", "height = 0\ndc = wx.MemoryDC()\nbm = wx.EmptyBitmap(1, 1)\ndc.SelectObject(bm)\nif fontStyle and fontStyle.size:\n targetSize = fontStyle.size * self.scale\nelse:\n targetSize = self.scale\nfont = None\nfor testSize in range(1, int(targetSize * 3)):\n font = wx.Font(testSize, family, wx.NORMAL, wx.NORMAL, 0, face)\n dc.SetFont(font)\n width, height = dc.GetTextExtent('F')\n if height >= targetSize:\n if __debug__:\n if height != targetSize:\n log.warn('wxBitmapFont Using point size %s for pixel size %s, actual pixel size %s', testSize, targetSize, height)\n break\nif font is None:\n raise ValueError('Invalid font-size specified (%s), no font found to match that size (target = %s pixels)' % (fontStyle.size, targetSize))\nreturn (family, face, font)", "if not self.systemNames:\n enumerator = wx.FontEnumerator()\n enumerator.EnumerateFacenames()\n systemNames = enumerator.GetFacenames()\n self.systemNames = systemNames = [item.lower() for item in systemNames]\nreturn self.systemNames"], "bodies_text": "<|body_start_0|>\n family, face, font = self.match(fontStyle, mode)\n bitmapFont = wxBitmapFont(fontStyle, font=font)\n self.addFont(fontStyle, bitmapFont)\n return bitmapFont\n<|end_body_0|>\n\n<|body_start_1|>\n family, face = self.FAMILYMAPPING.get('SERIF')\n if fontStyle and fontStyle.family:\n for specifier in fontStyle.family:\n specifier = specifier.lower()\n current = self.FAMILYMAPPING.get(specifier.upper())\n if current:\n family, face = current\n break\n for name in self.enumerate():\n if name.find(specifier) > -1:\n self.FAMILYMAPPING[specifier] = (wx.DEFAULT, name)\n family, face = (wx.DEFAULT, name)\n return self.calculatePointSize(fontStyle, family, face, mode)\n<|end_body_1|>\n\n<|body_start_2|>\n height = 0\n dc = wx.MemoryDC()\n bm = wx.EmptyBitmap(1, 1)\n dc.SelectObject(bm)\n if fontStyle and fontStyle.size:\n targetSize = fontStyle.size * self.scale\n else:\n targetSize = self.scale\n font = None\n for testSize in range(1, int(targetSize * 3)):\n font = wx.Font(testSize, family, wx.NORMAL, wx.NORMAL, 0, face)\n dc.SetFont(font)\n width, height = dc.GetTextExtent('F')\n if height >= targetSize:\n if __debug__:\n if height != targetSize:\n log.warn('wxBitmapFont Using point size %s for pixel size %s, actual pixel size %s', testSize, targetSize, height)\n break\n if font is None:\n raise ValueError('Invalid font-size specified (%s), no font found to match that size (target = %s pixels)' % (fontStyle.size, targetSize))\n return (family, face, font)\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.systemNames:\n enumerator = wx.FontEnumerator()\n enumerator.EnumerateFacenames()\n systemNames = enumerator.GetFacenames()\n self.systemNames = systemNames = [item.lower() for item in systemNames]\n return self.systemNames\n<|end_body_3|>\n", "class_docstring": "Singleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.", "class_name": "_wxFontProvider", "detected_licenses": ["MIT", "GPL-1.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-copyleft", "LGPL-2.1-or-later", "GPL-3.0-only", "LGPL-2.0-or-later", "GPL-3.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _wxFontProvider:\n \"\"\"Singleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\"\"\"\n\n def create(self, fontStyle, mode=None):\n \"\"\"Create a new font for the given fontStyle and mode\"\"\"\n <|body_0|>\n\n def match(self, fontStyle, mode=None):\n \"\"\"Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\"\"\"\n <|body_1|>\n\n def calculatePointSize(self, fontStyle, family, face, mode=None):\n \"\"\"Approximate point size for fontStyle with font\"\"\"\n <|body_2|>\n\n def enumerate(self, mode=None):\n \"\"\"Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n family, face, font = self.match(fontStyle, mode)\n bitmapFont = wxBitmapFont(fontStyle, font=font)\n self.addFont(fontStyle, bitmapFont)\n return bitmapFont\n<|end_body_0|>\n\n<|body_start_1|>\n family, face = self.FAMILYMAPPING.get('SERIF')\n if fontStyle and fontStyle.family:\n for specifier in fontStyle.family:\n specifier = specifier.lower()\n current = self.FAMILYMAPPING.get(specifier.upper())\n if current:\n family, face = current\n break\n for name in self.enumerate():\n if name.find(specifier) > -1:\n self.FAMILYMAPPING[specifier] = (wx.DEFAULT, name)\n family, face = (wx.DEFAULT, name)\n return self.calculatePointSize(fontStyle, family, face, mode)\n<|end_body_1|>\n\n<|body_start_2|>\n height = 0\n dc = wx.MemoryDC()\n bm = wx.EmptyBitmap(1, 1)\n dc.SelectObject(bm)\n if fontStyle and fontStyle.size:\n targetSize = fontStyle.size * self.scale\n else:\n targetSize = self.scale\n font = None\n for testSize in range(1, int(targetSize * 3)):\n font = wx.Font(testSize, family, wx.NORMAL, wx.NORMAL, 0, face)\n dc.SetFont(font)\n width, height = dc.GetTextExtent('F')\n if height >= targetSize:\n if __debug__:\n if height != targetSize:\n log.warn('wxBitmapFont Using point size %s for pixel size %s, actual pixel size %s', testSize, targetSize, height)\n break\n if font is None:\n raise ValueError('Invalid font-size specified (%s), no font found to match that size (target = %s pixels)' % (fontStyle.size, targetSize))\n return (family, face, font)\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.systemNames:\n enumerator = wx.FontEnumerator()\n enumerator.EnumerateFacenames()\n systemNames = enumerator.GetFacenames()\n self.systemNames = systemNames = [item.lower() for item in systemNames]\n return self.systemNames\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000301", "length_bytes": 11262, "license_type": "permissive", "methods": [{"docstring": "Create a new font for the given fontStyle and mode", "name": "create", "signature": "def create(self, fontStyle, mode=None)"}, {"docstring": "Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.", "name": "match", "signature": "def match(self, fontStyle, mode=None)"}, {"docstring": "Approximate point size for fontStyle with font", "name": "calculatePointSize", "signature": "def calculatePointSize(self, fontStyle, family, face, mode=None)"}, {"docstring": "Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).", "name": "enumerate", "signature": "def enumerate(self, mode=None)"}], "n_methods": 4, "prompt": "Implement the Python class `_wxFontProvider` described below.\n\nClass description:\nSingleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\n\nMethod signatures and docstrings:\n- def create(self, fontStyle, mode=None): Create a new font for the given fontStyle and mode\n- def match(self, fontStyle, mode=None): Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\n- def calculatePointSize(self, fontStyle, family, face, mode=None): Approximate point size for fontStyle with font\n- def enumerate(self, mode=None): Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).", "prompted_full_text": "Implement the Python class `_wxFontProvider` described below.\n\nClass description:\nSingleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\n\nMethod signatures and docstrings:\n- def create(self, fontStyle, mode=None): Create a new font for the given fontStyle and mode\n- def match(self, fontStyle, mode=None): Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\n- def calculatePointSize(self, fontStyle, family, face, mode=None): Approximate point size for fontStyle with font\n- def enumerate(self, mode=None): Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).\n\n<|skeleton|>\nclass _wxFontProvider:\n \"\"\"Singleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\"\"\"\n\n def create(self, fontStyle, mode=None):\n \"\"\"Create a new font for the given fontStyle and mode\"\"\"\n <|body_0|>\n\n def match(self, fontStyle, mode=None):\n \"\"\"Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\"\"\"\n <|body_1|>\n\n def calculatePointSize(self, fontStyle, family, face, mode=None):\n \"\"\"Approximate point size for fontStyle with font\"\"\"\n <|body_2|>\n\n def enumerate(self, mode=None):\n \"\"\"Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n family, face, font = self.match(fontStyle, mode)\n bitmapFont = wxBitmapFont(fontStyle, font=font)\n self.addFont(fontStyle, bitmapFont)\n return bitmapFont\n<|end_body_0|>\n\n<|body_start_1|>\n family, face = self.FAMILYMAPPING.get('SERIF')\n if fontStyle and fontStyle.family:\n for specifier in fontStyle.family:\n specifier = specifier.lower()\n current = self.FAMILYMAPPING.get(specifier.upper())\n if current:\n family, face = current\n break\n for name in self.enumerate():\n if name.find(specifier) > -1:\n self.FAMILYMAPPING[specifier] = (wx.DEFAULT, name)\n family, face = (wx.DEFAULT, name)\n return self.calculatePointSize(fontStyle, family, face, mode)\n<|end_body_1|>\n\n<|body_start_2|>\n height = 0\n dc = wx.MemoryDC()\n bm = wx.EmptyBitmap(1, 1)\n dc.SelectObject(bm)\n if fontStyle and fontStyle.size:\n targetSize = fontStyle.size * self.scale\n else:\n targetSize = self.scale\n font = None\n for testSize in range(1, int(targetSize * 3)):\n font = wx.Font(testSize, family, wx.NORMAL, wx.NORMAL, 0, face)\n dc.SetFont(font)\n width, height = dc.GetTextExtent('F')\n if height >= targetSize:\n if __debug__:\n if height != targetSize:\n log.warn('wxBitmapFont Using point size %s for pixel size %s, actual pixel size %s', testSize, targetSize, height)\n break\n if font is None:\n raise ValueError('Invalid font-size specified (%s), no font found to match that size (target = %s pixels)' % (fontStyle.size, targetSize))\n return (family, face, font)\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.systemNames:\n enumerator = wx.FontEnumerator()\n enumerator.EnumerateFacenames()\n systemNames = enumerator.GetFacenames()\n self.systemNames = systemNames = [item.lower() for item in systemNames]\n return self.systemNames\n<|end_body_3|>\n", "revision_id": "7f600ad153270feff12aa7aa86d7ed0a49ebc71c", "skeleton": "<|skeleton|>\nclass _wxFontProvider:\n \"\"\"Singleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\"\"\"\n\n def create(self, fontStyle, mode=None):\n \"\"\"Create a new font for the given fontStyle and mode\"\"\"\n <|body_0|>\n\n def match(self, fontStyle, mode=None):\n \"\"\"Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\"\"\"\n <|body_1|>\n\n def calculatePointSize(self, fontStyle, family, face, mode=None):\n \"\"\"Approximate point size for fontStyle with font\"\"\"\n <|body_2|>\n\n def enumerate(self, mode=None):\n \"\"\"Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _wxFontProvider:\n \"\"\"Singleton for creating new wxBitmapFonts Note: This provider MUST NOT be used under a non-wxPython context under Linux/GTK, as it WILL cause segmentation faults when the wxPython system tries to access the font list from the wxPython application.\"\"\"\n\n def create(self, fontStyle, mode=None):\n \"\"\"Create a new font for the given fontStyle and mode\"\"\"\n family, face, font = self.match(fontStyle, mode)\n bitmapFont = wxBitmapFont(fontStyle, font=font)\n self.addFont(fontStyle, bitmapFont)\n return bitmapFont\n\n def match(self, fontStyle, mode=None):\n \"\"\"Attempt to find matching wxFont for our fontstyle This is a really stupid implementation, it just takes the first font that includes the name specified in the fontstyle.\"\"\"\n family, face = self.FAMILYMAPPING.get('SERIF')\n if fontStyle and fontStyle.family:\n for specifier in fontStyle.family:\n specifier = specifier.lower()\n current = self.FAMILYMAPPING.get(specifier.upper())\n if current:\n family, face = current\n break\n for name in self.enumerate():\n if name.find(specifier) > -1:\n self.FAMILYMAPPING[specifier] = (wx.DEFAULT, name)\n family, face = (wx.DEFAULT, name)\n return self.calculatePointSize(fontStyle, family, face, mode)\n\n def calculatePointSize(self, fontStyle, family, face, mode=None):\n \"\"\"Approximate point size for fontStyle with font\"\"\"\n height = 0\n dc = wx.MemoryDC()\n bm = wx.EmptyBitmap(1, 1)\n dc.SelectObject(bm)\n if fontStyle and fontStyle.size:\n targetSize = fontStyle.size * self.scale\n else:\n targetSize = self.scale\n font = None\n for testSize in range(1, int(targetSize * 3)):\n font = wx.Font(testSize, family, wx.NORMAL, wx.NORMAL, 0, face)\n dc.SetFont(font)\n width, height = dc.GetTextExtent('F')\n if height >= targetSize:\n if __debug__:\n if height != targetSize:\n log.warn('wxBitmapFont Using point size %s for pixel size %s, actual pixel size %s', testSize, targetSize, height)\n break\n if font is None:\n raise ValueError('Invalid font-size specified (%s), no font found to match that size (target = %s pixels)' % (fontStyle.size, targetSize))\n return (family, face, font)\n\n def enumerate(self, mode=None):\n \"\"\"Iterate through all available font-keys (whether instantiated or not) This uses the wxFontEnumerator class to provide a list of font names from the wxPython system, (with all names lowercased).\"\"\"\n if not self.systemNames:\n enumerator = wx.FontEnumerator()\n enumerator.EnumerateFacenames()\n systemNames = enumerator.GetFacenames()\n self.systemNames = systemNames = [item.lower() for item in systemNames]\n return self.systemNames\n", "source": "the_stack_v2_python_sparse", "source_path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/text/wxfont.py", "source_repo": "alexus37/AugmentedRealityChess", "split": "val", "star_events_count": 1} {"blob_id": "60392b959b52d3e8e49f8e465d1a3644d085de2c", "bodies": ["with open(path, 'r') as stream:\n reader = FileReader(stream)\n for line in reader:\n if 'INFO' in line and '127.0.0.1' in line:\n yield self._parseStatusLine(line)\n elif 'ERROR' in line:\n yield self._parseErrorLine(line, reader)", "time = '%s %s' % (parts[0], parts[1])\ntime, microseconds = time.split(',')\ntime = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\nreturn datetime(time.year, time.month, time.day, time.hour, time.minute, time.second, int(microseconds))", "parts = line.split()\ntime = self._parseTime(parts)\ncode = int(parts[11])\nmethod = parts[8][1:]\nendpoint = parts[9]\nagent = parts[14][1:-1]\ncontentLength = parts[12]\ncontentLength = 0 if contentLength == '-' else int(contentLength)\nreturn StatusLine(time, code, method, endpoint, contentLength, agent)", "parts = line.split(' ', 2)\ntime = self._parseTime(parts)\nmessage = parts[2]\nmessage = message.replace('ERROR', '').strip()\nexceptionClass = None\ntraceback = None\nfor line in reader:\n if line.startswith('Traceback'):\n traceback = [line]\n for errorLine in reader:\n traceback.append(errorLine)\n if not errorLine.startswith(' ') and (not errorLine.startswith('---')):\n exceptionClass = errorLine.split(':')[0]\n if exceptionClass == 'None':\n exceptionClass = None\n break\n else:\n reader.putBack(line)\n break\nif traceback:\n traceback = ''.join(traceback)\nreturn ErrorLine(time, message, exceptionClass=exceptionClass, traceback=traceback)"], "bodies_text": "<|body_start_0|>\n with open(path, 'r') as stream:\n reader = FileReader(stream)\n for line in reader:\n if 'INFO' in line and '127.0.0.1' in line:\n yield self._parseStatusLine(line)\n elif 'ERROR' in line:\n yield self._parseErrorLine(line, reader)\n<|end_body_0|>\n\n<|body_start_1|>\n time = '%s %s' % (parts[0], parts[1])\n time, microseconds = time.split(',')\n time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n return datetime(time.year, time.month, time.day, time.hour, time.minute, time.second, int(microseconds))\n<|end_body_1|>\n\n<|body_start_2|>\n parts = line.split()\n time = self._parseTime(parts)\n code = int(parts[11])\n method = parts[8][1:]\n endpoint = parts[9]\n agent = parts[14][1:-1]\n contentLength = parts[12]\n contentLength = 0 if contentLength == '-' else int(contentLength)\n return StatusLine(time, code, method, endpoint, contentLength, agent)\n<|end_body_2|>\n\n<|body_start_3|>\n parts = line.split(' ', 2)\n time = self._parseTime(parts)\n message = parts[2]\n message = message.replace('ERROR', '').strip()\n exceptionClass = None\n traceback = None\n for line in reader:\n if line.startswith('Traceback'):\n traceback = [line]\n for errorLine in reader:\n traceback.append(errorLine)\n if not errorLine.startswith(' ') and (not errorLine.startswith('---')):\n exceptionClass = errorLine.split(':')[0]\n if exceptionClass == 'None':\n exceptionClass = None\n break\n else:\n reader.putBack(line)\n break\n if traceback:\n traceback = ''.join(traceback)\n return ErrorLine(time, message, exceptionClass=exceptionClass, traceback=traceback)\n<|end_body_3|>\n", "class_docstring": "Parser reads log files generated by the Fluidinfo API service.", "class_name": "LogParser", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogParser:\n \"\"\"Parser reads log files generated by the Fluidinfo API service.\"\"\"\n\n def parse(self, path):\n \"\"\"Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\"\"\"\n <|body_0|>\n\n def _parseTime(self, parts):\n \"\"\"Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\"\"\"\n <|body_1|>\n\n def _parseStatusLine(self, line):\n \"\"\"Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\"\"\"\n <|body_2|>\n\n def _parseErrorLine(self, line, reader):\n \"\"\"Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with open(path, 'r') as stream:\n reader = FileReader(stream)\n for line in reader:\n if 'INFO' in line and '127.0.0.1' in line:\n yield self._parseStatusLine(line)\n elif 'ERROR' in line:\n yield self._parseErrorLine(line, reader)\n<|end_body_0|>\n\n<|body_start_1|>\n time = '%s %s' % (parts[0], parts[1])\n time, microseconds = time.split(',')\n time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n return datetime(time.year, time.month, time.day, time.hour, time.minute, time.second, int(microseconds))\n<|end_body_1|>\n\n<|body_start_2|>\n parts = line.split()\n time = self._parseTime(parts)\n code = int(parts[11])\n method = parts[8][1:]\n endpoint = parts[9]\n agent = parts[14][1:-1]\n contentLength = parts[12]\n contentLength = 0 if contentLength == '-' else int(contentLength)\n return StatusLine(time, code, method, endpoint, contentLength, agent)\n<|end_body_2|>\n\n<|body_start_3|>\n parts = line.split(' ', 2)\n time = self._parseTime(parts)\n message = parts[2]\n message = message.replace('ERROR', '').strip()\n exceptionClass = None\n traceback = None\n for line in reader:\n if line.startswith('Traceback'):\n traceback = [line]\n for errorLine in reader:\n traceback.append(errorLine)\n if not errorLine.startswith(' ') and (not errorLine.startswith('---')):\n exceptionClass = errorLine.split(':')[0]\n if exceptionClass == 'None':\n exceptionClass = None\n break\n else:\n reader.putBack(line)\n break\n if traceback:\n traceback = ''.join(traceback)\n return ErrorLine(time, message, exceptionClass=exceptionClass, traceback=traceback)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000302", "length_bytes": 10436, "license_type": "permissive", "methods": [{"docstring": "Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.", "name": "parse", "signature": "def parse(self, path)"}, {"docstring": "Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.", "name": "_parseTime", "signature": "def _parseTime(self, parts)"}, {"docstring": "Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.", "name": "_parseStatusLine", "signature": "def _parseStatusLine(self, line)"}, {"docstring": "Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.", "name": "_parseErrorLine", "signature": "def _parseErrorLine(self, line, reader)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003310", "prompt": "Implement the Python class `LogParser` described below.\n\nClass description:\nParser reads log files generated by the Fluidinfo API service.\n\nMethod signatures and docstrings:\n- def parse(self, path): Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\n- def _parseTime(self, parts): Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\n- def _parseStatusLine(self, line): Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\n- def _parseErrorLine(self, line, reader): Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.", "prompted_full_text": "Implement the Python class `LogParser` described below.\n\nClass description:\nParser reads log files generated by the Fluidinfo API service.\n\nMethod signatures and docstrings:\n- def parse(self, path): Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\n- def _parseTime(self, parts): Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\n- def _parseStatusLine(self, line): Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\n- def _parseErrorLine(self, line, reader): Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.\n\n<|skeleton|>\nclass LogParser:\n \"\"\"Parser reads log files generated by the Fluidinfo API service.\"\"\"\n\n def parse(self, path):\n \"\"\"Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\"\"\"\n <|body_0|>\n\n def _parseTime(self, parts):\n \"\"\"Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\"\"\"\n <|body_1|>\n\n def _parseStatusLine(self, line):\n \"\"\"Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\"\"\"\n <|body_2|>\n\n def _parseErrorLine(self, line, reader):\n \"\"\"Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with open(path, 'r') as stream:\n reader = FileReader(stream)\n for line in reader:\n if 'INFO' in line and '127.0.0.1' in line:\n yield self._parseStatusLine(line)\n elif 'ERROR' in line:\n yield self._parseErrorLine(line, reader)\n<|end_body_0|>\n\n<|body_start_1|>\n time = '%s %s' % (parts[0], parts[1])\n time, microseconds = time.split(',')\n time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n return datetime(time.year, time.month, time.day, time.hour, time.minute, time.second, int(microseconds))\n<|end_body_1|>\n\n<|body_start_2|>\n parts = line.split()\n time = self._parseTime(parts)\n code = int(parts[11])\n method = parts[8][1:]\n endpoint = parts[9]\n agent = parts[14][1:-1]\n contentLength = parts[12]\n contentLength = 0 if contentLength == '-' else int(contentLength)\n return StatusLine(time, code, method, endpoint, contentLength, agent)\n<|end_body_2|>\n\n<|body_start_3|>\n parts = line.split(' ', 2)\n time = self._parseTime(parts)\n message = parts[2]\n message = message.replace('ERROR', '').strip()\n exceptionClass = None\n traceback = None\n for line in reader:\n if line.startswith('Traceback'):\n traceback = [line]\n for errorLine in reader:\n traceback.append(errorLine)\n if not errorLine.startswith(' ') and (not errorLine.startswith('---')):\n exceptionClass = errorLine.split(':')[0]\n if exceptionClass == 'None':\n exceptionClass = None\n break\n else:\n reader.putBack(line)\n break\n if traceback:\n traceback = ''.join(traceback)\n return ErrorLine(time, message, exceptionClass=exceptionClass, traceback=traceback)\n<|end_body_3|>\n", "revision_id": "b5a8c8349f3eaf3364cc4efba4736c3e33b30d96", "skeleton": "<|skeleton|>\nclass LogParser:\n \"\"\"Parser reads log files generated by the Fluidinfo API service.\"\"\"\n\n def parse(self, path):\n \"\"\"Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\"\"\"\n <|body_0|>\n\n def _parseTime(self, parts):\n \"\"\"Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\"\"\"\n <|body_1|>\n\n def _parseStatusLine(self, line):\n \"\"\"Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\"\"\"\n <|body_2|>\n\n def _parseErrorLine(self, line, reader):\n \"\"\"Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LogParser:\n \"\"\"Parser reads log files generated by the Fluidinfo API service.\"\"\"\n\n def parse(self, path):\n \"\"\"Generator parses a log file and yields L{StatusLine} and L{ErrorLine} instances built from data in the log. @param path: The path of the log file to parse.\"\"\"\n with open(path, 'r') as stream:\n reader = FileReader(stream)\n for line in reader:\n if 'INFO' in line and '127.0.0.1' in line:\n yield self._parseStatusLine(line)\n elif 'ERROR' in line:\n yield self._parseErrorLine(line, reader)\n\n def _parseTime(self, parts):\n \"\"\"Parse the time from the log line. @param parts: The log line, split into parts, to parse. @return: A C{datetime} with the time from the log line.\"\"\"\n time = '%s %s' % (parts[0], parts[1])\n time, microseconds = time.split(',')\n time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n return datetime(time.year, time.month, time.day, time.hour, time.minute, time.second, int(microseconds))\n\n def _parseStatusLine(self, line):\n \"\"\"Parse a status line from the log and convert it into a L{StatusLine}. @param line: The log line to parse. @return: A L{StatusLine} instance representing the log line.\"\"\"\n parts = line.split()\n time = self._parseTime(parts)\n code = int(parts[11])\n method = parts[8][1:]\n endpoint = parts[9]\n agent = parts[14][1:-1]\n contentLength = parts[12]\n contentLength = 0 if contentLength == '-' else int(contentLength)\n return StatusLine(time, code, method, endpoint, contentLength, agent)\n\n def _parseErrorLine(self, line, reader):\n \"\"\"Parse an error line from the log and convert it into a L{ErrorLine}. @param line: The log line to parse. @param stream: The file stream being read from. @return: An L{ErrorLine} instance representing the log line.\"\"\"\n parts = line.split(' ', 2)\n time = self._parseTime(parts)\n message = parts[2]\n message = message.replace('ERROR', '').strip()\n exceptionClass = None\n traceback = None\n for line in reader:\n if line.startswith('Traceback'):\n traceback = [line]\n for errorLine in reader:\n traceback.append(errorLine)\n if not errorLine.startswith(' ') and (not errorLine.startswith('---')):\n exceptionClass = errorLine.split(':')[0]\n if exceptionClass == 'None':\n exceptionClass = None\n break\n else:\n reader.putBack(line)\n break\n if traceback:\n traceback = ''.join(traceback)\n return ErrorLine(time, message, exceptionClass=exceptionClass, traceback=traceback)\n", "source": "the_stack_v2_python_sparse", "source_path": "fluiddb/scripts/logs.py", "source_repo": "fluidinfo/fluiddb", "split": "val", "star_events_count": 3} {"blob_id": "41977d0cff197b1a4ac91309d23062cd517937cb", "bodies": ["if not nums or k == 0:\n return []\ndeque = collections.deque()\nfor i in range(k):\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\nres = [deque[0]]\nfor i in range(k, len(nums)):\n if deque[0] == nums[i - k]:\n deque.popleft()\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res.append(deque[0])\nreturn res", "deque = collections.deque()\nres, n = ([], len(nums))\nfor i, j in zip(range(1 - k, n + 1 - k), range(n)):\n if i > 0 and deque[0] == nums[i - 1]:\n deque.popleft()\n while deque and deque[-1] < nums[j]:\n deque.pop()\n deque.append(nums[j])\n if i >= 0:\n res.append(deque[0])\nreturn res", "window = MonotonicQueue()\nres = []\nfor i in range(len(nums)):\n if i < k - 1:\n window.push(nums[i])\n else:\n window.push(nums[i])\n res.append(window.max())\n window.pop(nums[i - k + 1])\nreturn res"], "bodies_text": "<|body_start_0|>\n if not nums or k == 0:\n return []\n deque = collections.deque()\n for i in range(k):\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res = [deque[0]]\n for i in range(k, len(nums)):\n if deque[0] == nums[i - k]:\n deque.popleft()\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res.append(deque[0])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n deque = collections.deque()\n res, n = ([], len(nums))\n for i, j in zip(range(1 - k, n + 1 - k), range(n)):\n if i > 0 and deque[0] == nums[i - 1]:\n deque.popleft()\n while deque and deque[-1] < nums[j]:\n deque.pop()\n deque.append(nums[j])\n if i >= 0:\n res.append(deque[0])\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n window = MonotonicQueue()\n res = []\n for i in range(len(nums)):\n if i < k - 1:\n window.push(nums[i])\n else:\n window.push(nums[i])\n res.append(window.max())\n window.pop(nums[i - k + 1])\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \"\"\"维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\"\"\"\n <|body_0|>\n\n def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]:\n \"\"\"单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\"\"\"\n <|body_1|>\n\n def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]:\n \"\"\"滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums or k == 0:\n return []\n deque = collections.deque()\n for i in range(k):\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res = [deque[0]]\n for i in range(k, len(nums)):\n if deque[0] == nums[i - k]:\n deque.popleft()\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res.append(deque[0])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n deque = collections.deque()\n res, n = ([], len(nums))\n for i, j in zip(range(1 - k, n + 1 - k), range(n)):\n if i > 0 and deque[0] == nums[i - 1]:\n deque.popleft()\n while deque and deque[-1] < nums[j]:\n deque.pop()\n deque.append(nums[j])\n if i >= 0:\n res.append(deque[0])\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n window = MonotonicQueue()\n res = []\n for i in range(len(nums)):\n if i < k - 1:\n window.push(nums[i])\n else:\n window.push(nums[i])\n res.append(window.max())\n window.pop(nums[i - k + 1])\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000303", "length_bytes": 4339, "license_type": "permissive", "methods": [{"docstring": "维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除", "name": "maxSlidingWindow", "signature": "def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]"}, {"docstring": "单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/", "name": "maxSlidingWindow1", "signature": "def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]"}, {"docstring": "滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/", "name": "maxSlidingWindow2", "signature": "def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]: 维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\n- def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]: 单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\n- def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]: 滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]: 维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\n- def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]: 单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\n- def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]: 滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/\n\n<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \"\"\"维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\"\"\"\n <|body_0|>\n\n def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]:\n \"\"\"单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\"\"\"\n <|body_1|>\n\n def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]:\n \"\"\"滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums or k == 0:\n return []\n deque = collections.deque()\n for i in range(k):\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res = [deque[0]]\n for i in range(k, len(nums)):\n if deque[0] == nums[i - k]:\n deque.popleft()\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res.append(deque[0])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n deque = collections.deque()\n res, n = ([], len(nums))\n for i, j in zip(range(1 - k, n + 1 - k), range(n)):\n if i > 0 and deque[0] == nums[i - 1]:\n deque.popleft()\n while deque and deque[-1] < nums[j]:\n deque.pop()\n deque.append(nums[j])\n if i >= 0:\n res.append(deque[0])\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n window = MonotonicQueue()\n res = []\n for i in range(len(nums)):\n if i < k - 1:\n window.push(nums[i])\n else:\n window.push(nums[i])\n res.append(window.max())\n window.pop(nums[i - k + 1])\n return res\n<|end_body_2|>\n", "revision_id": "e8a1c6cae6547cbcb6e8494be6df685f3e7c837c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \"\"\"维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\"\"\"\n <|body_0|>\n\n def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]:\n \"\"\"单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\"\"\"\n <|body_1|>\n\n def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]:\n \"\"\"滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \"\"\"维护单调递减队列:窗口滑动添加了元素 nums[j + 1] ,需将 deque 内所有 < nums[j + 1] 的元素删除\"\"\"\n if not nums or k == 0:\n return []\n deque = collections.deque()\n for i in range(k):\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res = [deque[0]]\n for i in range(k, len(nums)):\n if deque[0] == nums[i - k]:\n deque.popleft()\n while deque and deque[-1] < nums[i]:\n deque.pop()\n deque.append(nums[i])\n res.append(deque[0])\n return res\n\n def maxSlidingWindow1(self, nums: List[int], k: int) -> List[int]:\n \"\"\"单调队列经典题目:https://leetcode-cn.com/problems/hua-dong-chuang-kou-de-zui-da-zhi-lcof/solution/mian-shi-ti-59-i-hua-dong-chuang-kou-de-zui-da-1-6/\"\"\"\n deque = collections.deque()\n res, n = ([], len(nums))\n for i, j in zip(range(1 - k, n + 1 - k), range(n)):\n if i > 0 and deque[0] == nums[i - 1]:\n deque.popleft()\n while deque and deque[-1] < nums[j]:\n deque.pop()\n deque.append(nums[j])\n if i >= 0:\n res.append(deque[0])\n return res\n\n def maxSlidingWindow2(self, nums: List[int], k: int) -> List[int]:\n \"\"\"滑动窗口之单调队列:https://leetcode-cn.com/problems/sliding-window-maximum/solution/dan-diao-dui-lie-by-labuladong/\"\"\"\n window = MonotonicQueue()\n res = []\n for i in range(len(nums)):\n if i < k - 1:\n window.push(nums[i])\n else:\n window.push(nums[i])\n res.append(window.max())\n window.pop(nums[i - k + 1])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "lcof/59-hua-dong-chuang-kou-de-zui-da-zhi-lcof.py", "source_repo": "yuenliou/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "64a9dddd8ed4ca582312c8245726f95378e9db5b", "bodies": ["a = Ligne(self.coords[:4]).perimetre()\nb = Ligne(self.coords[2:]).perimetre()\nc = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\nreturn a + b + c", "a = Ligne(self.coords[:4]).perimetre()\nb = Ligne(self.coords[2:]).perimetre()\nc = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\nds = (a + b + c) / 2\nreturn math.sqrt(ds * abs(ds - a) * abs(ds - b) * abs(ds - c))"], "bodies_text": "<|body_start_0|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n return a + b + c\n<|end_body_0|>\n\n<|body_start_1|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n ds = (a + b + c) / 2\n return math.sqrt(ds * abs(ds - a) * abs(ds - b) * abs(ds - c))\n<|end_body_1|>\n", "class_docstring": "Definition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float", "class_name": "Triangle", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Triangle:\n \"\"\"Definition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\"\"\"\n\n def perimetre(self):\n \"\"\"Calcul du périmètre d'un rectangle (a + b + c)\"\"\"\n <|body_0|>\n\n def aire(self):\n \"\"\"Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n return a + b + c\n<|end_body_0|>\n\n<|body_start_1|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n ds = (a + b + c) / 2\n return math.sqrt(ds * abs(ds - a) * abs(ds - b) * abs(ds - c))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000304", "length_bytes": 4984, "license_type": "no_license", "methods": [{"docstring": "Calcul du périmètre d'un rectangle (a + b + c)", "name": "perimetre", "signature": "def perimetre(self)"}, {"docstring": "Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))", "name": "aire", "signature": "def aire(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000065", "prompt": "Implement the Python class `Triangle` described below.\n\nClass description:\nDefinition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\n\nMethod signatures and docstrings:\n- def perimetre(self): Calcul du périmètre d'un rectangle (a + b + c)\n- def aire(self): Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))", "prompted_full_text": "Implement the Python class `Triangle` described below.\n\nClass description:\nDefinition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\n\nMethod signatures and docstrings:\n- def perimetre(self): Calcul du périmètre d'un rectangle (a + b + c)\n- def aire(self): Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))\n\n<|skeleton|>\nclass Triangle:\n \"\"\"Definition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\"\"\"\n\n def perimetre(self):\n \"\"\"Calcul du périmètre d'un rectangle (a + b + c)\"\"\"\n <|body_0|>\n\n def aire(self):\n \"\"\"Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n return a + b + c\n<|end_body_0|>\n\n<|body_start_1|>\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n ds = (a + b + c) / 2\n return math.sqrt(ds * abs(ds - a) * abs(ds - b) * abs(ds - c))\n<|end_body_1|>\n", "revision_id": "80ad880298bf9f6a2f32f87ec90e43068380c7a5", "skeleton": "<|skeleton|>\nclass Triangle:\n \"\"\"Definition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\"\"\"\n\n def perimetre(self):\n \"\"\"Calcul du périmètre d'un rectangle (a + b + c)\"\"\"\n <|body_0|>\n\n def aire(self):\n \"\"\"Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Triangle:\n \"\"\"Definition d'une Forme à 3 points coords : Liste des coordonnées des points de la forme (x, y) def aire() -> float def perimetre() -> float\"\"\"\n\n def perimetre(self):\n \"\"\"Calcul du périmètre d'un rectangle (a + b + c)\"\"\"\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n return a + b + c\n\n def aire(self):\n \"\"\"Calcul de l'aire d'un rectangle s = (a + b + c) / 2 A = sqrt(s.(s-a).(s-b).(s-c))\"\"\"\n a = Ligne(self.coords[:4]).perimetre()\n b = Ligne(self.coords[2:]).perimetre()\n c = Ligne(self.coords[:2] + self.coords[-2:]).perimetre()\n ds = (a + b + c) / 2\n return math.sqrt(ds * abs(ds - a) * abs(ds - b) * abs(ds - c))\n", "source": "the_stack_v2_python_sparse", "source_path": "formes.py", "source_repo": "christophejacques/examples", "split": "val", "star_events_count": 0} {"blob_id": "e125e655a8febcb816ca069eaaa3bbd2076ae4e7", "bodies": ["super(Decoder, self).__init__()\nself.filter_widths = [N // 2 ** (l + 1) for l in range(layers)]\ntotal_input_width = np.array(self.filter_widths).sum()\nself.bottleneck = nn.Sequential(nn.ConvTranspose1d(N, total_input_width, kernel_size=1, stride=1, bias=False), nn.ReLU())\nself.filters = nn.ModuleList([])\nfor l in range(layers):\n n = N // 2 ** (l + 1)\n k = kernel_size * 2 ** l\n self.filters.append(nn.ConvTranspose1d(n, 1, kernel_size=k, stride=stride, bias=False, padding=(k - stride) // 2))", "x = self.bottleneck(x)\noutput = 0.0\nx = x.split(self.filter_widths, dim=1)\nfor i in range(len(x)):\n output += self.filters[i](x[i])\nreturn output"], "bodies_text": "<|body_start_0|>\n super(Decoder, self).__init__()\n self.filter_widths = [N // 2 ** (l + 1) for l in range(layers)]\n total_input_width = np.array(self.filter_widths).sum()\n self.bottleneck = nn.Sequential(nn.ConvTranspose1d(N, total_input_width, kernel_size=1, stride=1, bias=False), nn.ReLU())\n self.filters = nn.ModuleList([])\n for l in range(layers):\n n = N // 2 ** (l + 1)\n k = kernel_size * 2 ** l\n self.filters.append(nn.ConvTranspose1d(n, 1, kernel_size=k, stride=stride, bias=False, padding=(k - stride) // 2))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.bottleneck(x)\n output = 0.0\n x = x.split(self.filter_widths, dim=1)\n for i in range(len(x)):\n output += self.filters[i](x[i])\n return output\n<|end_body_1|>\n", "class_docstring": "Decodes the latent representation back to waveforms", "class_name": "Decoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decoder:\n \"\"\"Decodes the latent representation back to waveforms\"\"\"\n\n def __init__(self, N, kernel_size, stride, layers):\n \"\"\"Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Decoder, self).__init__()\n self.filter_widths = [N // 2 ** (l + 1) for l in range(layers)]\n total_input_width = np.array(self.filter_widths).sum()\n self.bottleneck = nn.Sequential(nn.ConvTranspose1d(N, total_input_width, kernel_size=1, stride=1, bias=False), nn.ReLU())\n self.filters = nn.ModuleList([])\n for l in range(layers):\n n = N // 2 ** (l + 1)\n k = kernel_size * 2 ** l\n self.filters.append(nn.ConvTranspose1d(n, 1, kernel_size=k, stride=stride, bias=False, padding=(k - stride) // 2))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.bottleneck(x)\n output = 0.0\n x = x.split(self.filter_widths, dim=1)\n for i in range(len(x)):\n output += self.filters[i](x[i])\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000305", "length_bytes": 37269, "license_type": "no_license", "methods": [{"docstring": "Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes", "name": "__init__", "signature": "def __init__(self, N, kernel_size, stride, layers)"}, {"docstring": "Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004201", "prompt": "Implement the Python class `Decoder` described below.\n\nClass description:\nDecodes the latent representation back to waveforms\n\nMethod signatures and docstrings:\n- def __init__(self, N, kernel_size, stride, layers): Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\n- def forward(self, x): Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)", "prompted_full_text": "Implement the Python class `Decoder` described below.\n\nClass description:\nDecodes the latent representation back to waveforms\n\nMethod signatures and docstrings:\n- def __init__(self, N, kernel_size, stride, layers): Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\n- def forward(self, x): Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)\n\n<|skeleton|>\nclass Decoder:\n \"\"\"Decodes the latent representation back to waveforms\"\"\"\n\n def __init__(self, N, kernel_size, stride, layers):\n \"\"\"Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Decoder, self).__init__()\n self.filter_widths = [N // 2 ** (l + 1) for l in range(layers)]\n total_input_width = np.array(self.filter_widths).sum()\n self.bottleneck = nn.Sequential(nn.ConvTranspose1d(N, total_input_width, kernel_size=1, stride=1, bias=False), nn.ReLU())\n self.filters = nn.ModuleList([])\n for l in range(layers):\n n = N // 2 ** (l + 1)\n k = kernel_size * 2 ** l\n self.filters.append(nn.ConvTranspose1d(n, 1, kernel_size=k, stride=stride, bias=False, padding=(k - stride) // 2))\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.bottleneck(x)\n output = 0.0\n x = x.split(self.filter_widths, dim=1)\n for i in range(len(x)):\n output += self.filters[i](x[i])\n return output\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass Decoder:\n \"\"\"Decodes the latent representation back to waveforms\"\"\"\n\n def __init__(self, N, kernel_size, stride, layers):\n \"\"\"Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Decoder:\n \"\"\"Decodes the latent representation back to waveforms\"\"\"\n\n def __init__(self, N, kernel_size, stride, layers):\n \"\"\"Arguments: N {int} -- Dimension of the input latent representation kernel_size {int} -- Base convolutional kernel size stride {int} -- Stride of the transposed covolutions layers {int} -- Number of parallel convolutions with different kernel sizes\"\"\"\n super(Decoder, self).__init__()\n self.filter_widths = [N // 2 ** (l + 1) for l in range(layers)]\n total_input_width = np.array(self.filter_widths).sum()\n self.bottleneck = nn.Sequential(nn.ConvTranspose1d(N, total_input_width, kernel_size=1, stride=1, bias=False), nn.ReLU())\n self.filters = nn.ModuleList([])\n for l in range(layers):\n n = N // 2 ** (l + 1)\n k = kernel_size * 2 ** l\n self.filters.append(nn.ConvTranspose1d(n, 1, kernel_size=k, stride=stride, bias=False, padding=(k - stride) // 2))\n\n def forward(self, x):\n \"\"\"Arguments: x {torch.tensor} -- Latent representation of the four instrument with shape (B*4, N, T') Returns: torch.tensor -- Signal of the four instruments with shape (B*4, 1, T)\"\"\"\n x = self.bottleneck(x)\n output = 0.0\n x = x.split(self.filter_widths, dim=1)\n for i in range(len(x)):\n output += self.filters[i](x[i])\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_pfnet_research_meta_tasnet.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "cd1cf625e58385e673ac0ad20f2346e6d7610a25", "bodies": ["configs = None\nconfigsDao = ConfigsDao()\ntry:\n configs = configsDao.add(args)\nexcept Exception as e:\n abort(500, e)\nreturn configs", "record = None\nconfigsDao = ConfigsDao()\ntry:\n record = configsDao.edit(args)\nexcept Exception as e:\n abort(500, e)\nreturn record", "result = False\nids = args.get('ids')\nconfigsDao = ConfigsDao()\ntry:\n result = configsDao.delete(ids)\nexcept Exception as e:\n abort(500, e)\nreturn {'status': result}", "record = None\nconfigsDao = ConfigsDao()\nid = request.uid\nrecord = configsDao.getById(id)\nreturn record"], "bodies_text": "<|body_start_0|>\n configs = None\n configsDao = ConfigsDao()\n try:\n configs = configsDao.add(args)\n except Exception as e:\n abort(500, e)\n return configs\n<|end_body_0|>\n\n<|body_start_1|>\n record = None\n configsDao = ConfigsDao()\n try:\n record = configsDao.edit(args)\n except Exception as e:\n abort(500, e)\n return record\n<|end_body_1|>\n\n<|body_start_2|>\n result = False\n ids = args.get('ids')\n configsDao = ConfigsDao()\n try:\n result = configsDao.delete(ids)\n except Exception as e:\n abort(500, e)\n return {'status': result}\n<|end_body_2|>\n\n<|body_start_3|>\n record = None\n configsDao = ConfigsDao()\n id = request.uid\n record = configsDao.getById(id)\n return record\n<|end_body_3|>\n", "class_docstring": "configs module resource main service: add/delete/edit/view", "class_name": "ConfigsAPI", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigsAPI:\n \"\"\"configs module resource main service: add/delete/edit/view\"\"\"\n\n def post(self, args):\n \"\"\"add\"\"\"\n <|body_0|>\n\n def put(self, args):\n \"\"\"edit\"\"\"\n <|body_1|>\n\n def delete(self, args):\n \"\"\"delete\"\"\"\n <|body_2|>\n\n def get(self, args):\n \"\"\"view\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n configs = None\n configsDao = ConfigsDao()\n try:\n configs = configsDao.add(args)\n except Exception as e:\n abort(500, e)\n return configs\n<|end_body_0|>\n\n<|body_start_1|>\n record = None\n configsDao = ConfigsDao()\n try:\n record = configsDao.edit(args)\n except Exception as e:\n abort(500, e)\n return record\n<|end_body_1|>\n\n<|body_start_2|>\n result = False\n ids = args.get('ids')\n configsDao = ConfigsDao()\n try:\n result = configsDao.delete(ids)\n except Exception as e:\n abort(500, e)\n return {'status': result}\n<|end_body_2|>\n\n<|body_start_3|>\n record = None\n configsDao = ConfigsDao()\n id = request.uid\n record = configsDao.getById(id)\n return record\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000306", "length_bytes": 5875, "license_type": "permissive", "methods": [{"docstring": "add", "name": "post", "signature": "def post(self, args)"}, {"docstring": "edit", "name": "put", "signature": "def put(self, args)"}, {"docstring": "delete", "name": "delete", "signature": "def delete(self, args)"}, {"docstring": "view", "name": "get", "signature": "def get(self, args)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_007247", "prompt": "Implement the Python class `ConfigsAPI` described below.\n\nClass description:\nconfigs module resource main service: add/delete/edit/view\n\nMethod signatures and docstrings:\n- def post(self, args): add\n- def put(self, args): edit\n- def delete(self, args): delete\n- def get(self, args): view", "prompted_full_text": "Implement the Python class `ConfigsAPI` described below.\n\nClass description:\nconfigs module resource main service: add/delete/edit/view\n\nMethod signatures and docstrings:\n- def post(self, args): add\n- def put(self, args): edit\n- def delete(self, args): delete\n- def get(self, args): view\n\n<|skeleton|>\nclass ConfigsAPI:\n \"\"\"configs module resource main service: add/delete/edit/view\"\"\"\n\n def post(self, args):\n \"\"\"add\"\"\"\n <|body_0|>\n\n def put(self, args):\n \"\"\"edit\"\"\"\n <|body_1|>\n\n def delete(self, args):\n \"\"\"delete\"\"\"\n <|body_2|>\n\n def get(self, args):\n \"\"\"view\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n configs = None\n configsDao = ConfigsDao()\n try:\n configs = configsDao.add(args)\n except Exception as e:\n abort(500, e)\n return configs\n<|end_body_0|>\n\n<|body_start_1|>\n record = None\n configsDao = ConfigsDao()\n try:\n record = configsDao.edit(args)\n except Exception as e:\n abort(500, e)\n return record\n<|end_body_1|>\n\n<|body_start_2|>\n result = False\n ids = args.get('ids')\n configsDao = ConfigsDao()\n try:\n result = configsDao.delete(ids)\n except Exception as e:\n abort(500, e)\n return {'status': result}\n<|end_body_2|>\n\n<|body_start_3|>\n record = None\n configsDao = ConfigsDao()\n id = request.uid\n record = configsDao.getById(id)\n return record\n<|end_body_3|>\n", "revision_id": "0fb1b604185a8bd8b72c1d2d527fb94bbaf46a86", "skeleton": "<|skeleton|>\nclass ConfigsAPI:\n \"\"\"configs module resource main service: add/delete/edit/view\"\"\"\n\n def post(self, args):\n \"\"\"add\"\"\"\n <|body_0|>\n\n def put(self, args):\n \"\"\"edit\"\"\"\n <|body_1|>\n\n def delete(self, args):\n \"\"\"delete\"\"\"\n <|body_2|>\n\n def get(self, args):\n \"\"\"view\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConfigsAPI:\n \"\"\"configs module resource main service: add/delete/edit/view\"\"\"\n\n def post(self, args):\n \"\"\"add\"\"\"\n configs = None\n configsDao = ConfigsDao()\n try:\n configs = configsDao.add(args)\n except Exception as e:\n abort(500, e)\n return configs\n\n def put(self, args):\n \"\"\"edit\"\"\"\n record = None\n configsDao = ConfigsDao()\n try:\n record = configsDao.edit(args)\n except Exception as e:\n abort(500, e)\n return record\n\n def delete(self, args):\n \"\"\"delete\"\"\"\n result = False\n ids = args.get('ids')\n configsDao = ConfigsDao()\n try:\n result = configsDao.delete(ids)\n except Exception as e:\n abort(500, e)\n return {'status': result}\n\n def get(self, args):\n \"\"\"view\"\"\"\n record = None\n configsDao = ConfigsDao()\n id = request.uid\n record = configsDao.getById(id)\n return record\n", "source": "the_stack_v2_python_sparse", "source_path": "app/modules/configs/resource.py", "source_repo": "daitouli/baoaiback", "split": "val", "star_events_count": 0} {"blob_id": "e3f1e91a022165a526299378047d7249c65a6eaa", "bodies": ["squad_id = request.GET.get('id', None)\nif squad_id is not None:\n squad = get_object_or_404(Squad, id=squad_id)\n serializer = SquadSerializer(squad)\n return JsonResponse({'squads': [serializer.data]}, safe=False)\ntutor_username = request.GET.get('tutor_username', None)\nif tutor_username is not None:\n tutor = get_object_or_404(Tutor, user__username=tutor_username)\n squads = tutor.squads.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\nsquads = Squad.objects.all()\nserializer = SquadSerializer(squads, many=True)\nreturn JsonResponse({'squads': serializer.data}, safe=False)", "res = {'squads': []}\nwith transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n squad = get_object_or_404(Squad, id=data['id'])\n serializer = SquadSerializer(squad, data=data, partial=True)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\nreturn JsonResponse(res, status=200, safe=False)", "res = {'squads': []}\nwith transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n serializer = SquadSerializer(data=data)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\nreturn JsonResponse(res, status=200, safe=False)", "with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n Squad.objects.get(id=data['id']).delete()\nreturn Response()"], "bodies_text": "<|body_start_0|>\n squad_id = request.GET.get('id', None)\n if squad_id is not None:\n squad = get_object_or_404(Squad, id=squad_id)\n serializer = SquadSerializer(squad)\n return JsonResponse({'squads': [serializer.data]}, safe=False)\n tutor_username = request.GET.get('tutor_username', None)\n if tutor_username is not None:\n tutor = get_object_or_404(Tutor, user__username=tutor_username)\n squads = tutor.squads.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n squads = Squad.objects.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n squad = get_object_or_404(Squad, id=data['id'])\n serializer = SquadSerializer(squad, data=data, partial=True)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_1|>\n\n<|body_start_2|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n serializer = SquadSerializer(data=data)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_2|>\n\n<|body_start_3|>\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n Squad.objects.get(id=data['id']).delete()\n return Response()\n<|end_body_3|>\n", "class_docstring": "班级view", "class_name": "Squads", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Squads:\n \"\"\"班级view\"\"\"\n\n def get(self, request):\n \"\"\"查询班级\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"修改班级\"\"\"\n <|body_1|>\n\n def post(self, request):\n \"\"\"增加班级\"\"\"\n <|body_2|>\n\n def delete(self, request):\n \"\"\"删除班级\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n squad_id = request.GET.get('id', None)\n if squad_id is not None:\n squad = get_object_or_404(Squad, id=squad_id)\n serializer = SquadSerializer(squad)\n return JsonResponse({'squads': [serializer.data]}, safe=False)\n tutor_username = request.GET.get('tutor_username', None)\n if tutor_username is not None:\n tutor = get_object_or_404(Tutor, user__username=tutor_username)\n squads = tutor.squads.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n squads = Squad.objects.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n squad = get_object_or_404(Squad, id=data['id'])\n serializer = SquadSerializer(squad, data=data, partial=True)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_1|>\n\n<|body_start_2|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n serializer = SquadSerializer(data=data)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_2|>\n\n<|body_start_3|>\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n Squad.objects.get(id=data['id']).delete()\n return Response()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000307", "length_bytes": 16053, "license_type": "permissive", "methods": [{"docstring": "查询班级", "name": "get", "signature": "def get(self, request)"}, {"docstring": "修改班级", "name": "put", "signature": "def put(self, request)"}, {"docstring": "增加班级", "name": "post", "signature": "def post(self, request)"}, {"docstring": "删除班级", "name": "delete", "signature": "def delete(self, request)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005674", "prompt": "Implement the Python class `Squads` described below.\n\nClass description:\n班级view\n\nMethod signatures and docstrings:\n- def get(self, request): 查询班级\n- def put(self, request): 修改班级\n- def post(self, request): 增加班级\n- def delete(self, request): 删除班级", "prompted_full_text": "Implement the Python class `Squads` described below.\n\nClass description:\n班级view\n\nMethod signatures and docstrings:\n- def get(self, request): 查询班级\n- def put(self, request): 修改班级\n- def post(self, request): 增加班级\n- def delete(self, request): 删除班级\n\n<|skeleton|>\nclass Squads:\n \"\"\"班级view\"\"\"\n\n def get(self, request):\n \"\"\"查询班级\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"修改班级\"\"\"\n <|body_1|>\n\n def post(self, request):\n \"\"\"增加班级\"\"\"\n <|body_2|>\n\n def delete(self, request):\n \"\"\"删除班级\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n squad_id = request.GET.get('id', None)\n if squad_id is not None:\n squad = get_object_or_404(Squad, id=squad_id)\n serializer = SquadSerializer(squad)\n return JsonResponse({'squads': [serializer.data]}, safe=False)\n tutor_username = request.GET.get('tutor_username', None)\n if tutor_username is not None:\n tutor = get_object_or_404(Tutor, user__username=tutor_username)\n squads = tutor.squads.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n squads = Squad.objects.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n squad = get_object_or_404(Squad, id=data['id'])\n serializer = SquadSerializer(squad, data=data, partial=True)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_1|>\n\n<|body_start_2|>\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n serializer = SquadSerializer(data=data)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n<|end_body_2|>\n\n<|body_start_3|>\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n Squad.objects.get(id=data['id']).delete()\n return Response()\n<|end_body_3|>\n", "revision_id": "7aaa1be773718de1beb3ce0080edca7c4114b7ad", "skeleton": "<|skeleton|>\nclass Squads:\n \"\"\"班级view\"\"\"\n\n def get(self, request):\n \"\"\"查询班级\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"修改班级\"\"\"\n <|body_1|>\n\n def post(self, request):\n \"\"\"增加班级\"\"\"\n <|body_2|>\n\n def delete(self, request):\n \"\"\"删除班级\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Squads:\n \"\"\"班级view\"\"\"\n\n def get(self, request):\n \"\"\"查询班级\"\"\"\n squad_id = request.GET.get('id', None)\n if squad_id is not None:\n squad = get_object_or_404(Squad, id=squad_id)\n serializer = SquadSerializer(squad)\n return JsonResponse({'squads': [serializer.data]}, safe=False)\n tutor_username = request.GET.get('tutor_username', None)\n if tutor_username is not None:\n tutor = get_object_or_404(Tutor, user__username=tutor_username)\n squads = tutor.squads.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n squads = Squad.objects.all()\n serializer = SquadSerializer(squads, many=True)\n return JsonResponse({'squads': serializer.data}, safe=False)\n\n def put(self, request):\n \"\"\"修改班级\"\"\"\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n squad = get_object_or_404(Squad, id=data['id'])\n serializer = SquadSerializer(squad, data=data, partial=True)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n\n def post(self, request):\n \"\"\"增加班级\"\"\"\n res = {'squads': []}\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n serializer = SquadSerializer(data=data)\n if not serializer.is_valid():\n raise ParseError(serializer.errors)\n serializer.save()\n res['squads'].append(serializer.data)\n return JsonResponse(res, status=200, safe=False)\n\n def delete(self, request):\n \"\"\"删除班级\"\"\"\n with transaction.atomic():\n for data in JSONParser().parse(request)['squads']:\n Squad.objects.get(id=data['id']).delete()\n return Response()\n", "source": "the_stack_v2_python_sparse", "source_path": "user/views.py", "source_repo": "MIXISAMA/MIS-backend", "split": "val", "star_events_count": 0} {"blob_id": "42d1706789a6fd65c652127cba0347966b3d120d", "bodies": ["def _generate(open, close, str, l=[]):\n if open == 0 and close == 0:\n l.append(str)\n if open > 0:\n _generate(open - 1, close + 1, str + '(', l)\n if close > 0:\n _generate(open, close - 1, str + ')', l)\n return l\nreturn _generate(n, 0, '')", "res = []\n\ndef is_valid(s):\n b = 0\n for c in s:\n if c == '(':\n b += 1\n else:\n b -= 1\n if b < 0:\n return False\n return b == 0\n\ndef generate_all(curr=[]):\n if 2 * n == len(curr):\n if is_valid(curr):\n res.append(''.join(curr))\n else:\n curr.append('(')\n generate_all(curr)\n curr.pop()\n curr.append(')')\n generate_all(curr)\n curr.pop()\ngenerate_all()\nreturn res", "if N == 0:\n return ['']\nans = []\nfor c in range(N):\n for left in self.generateParenthesis(c):\n for right in self.generateParenthesis(N - 1 - c):\n ans.append('({}){}'.format(left, right))\nreturn ans"], "bodies_text": "<|body_start_0|>\n def _generate(open, close, str, l=[]):\n if open == 0 and close == 0:\n l.append(str)\n if open > 0:\n _generate(open - 1, close + 1, str + '(', l)\n if close > 0:\n _generate(open, close - 1, str + ')', l)\n return l\n return _generate(n, 0, '')\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n\n def is_valid(s):\n b = 0\n for c in s:\n if c == '(':\n b += 1\n else:\n b -= 1\n if b < 0:\n return False\n return b == 0\n\n def generate_all(curr=[]):\n if 2 * n == len(curr):\n if is_valid(curr):\n res.append(''.join(curr))\n else:\n curr.append('(')\n generate_all(curr)\n curr.pop()\n curr.append(')')\n generate_all(curr)\n curr.pop()\n generate_all()\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 0:\n return ['']\n ans = []\n for c in range(N):\n for left in self.generateParenthesis(c):\n for right in self.generateParenthesis(N - 1 - c):\n ans.append('({}){}'.format(left, right))\n return ans\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n: int) -> List[str]:\n \"\"\"Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\"\"\"\n <|body_0|>\n\n def generateParenthesis2(self, n: int) -> List[str]:\n \"\"\"Brute force solution, try all possible combinations and check if they are valid.\"\"\"\n <|body_1|>\n\n def generateParenthesis3(self, N):\n \"\"\"Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def _generate(open, close, str, l=[]):\n if open == 0 and close == 0:\n l.append(str)\n if open > 0:\n _generate(open - 1, close + 1, str + '(', l)\n if close > 0:\n _generate(open, close - 1, str + ')', l)\n return l\n return _generate(n, 0, '')\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n\n def is_valid(s):\n b = 0\n for c in s:\n if c == '(':\n b += 1\n else:\n b -= 1\n if b < 0:\n return False\n return b == 0\n\n def generate_all(curr=[]):\n if 2 * n == len(curr):\n if is_valid(curr):\n res.append(''.join(curr))\n else:\n curr.append('(')\n generate_all(curr)\n curr.pop()\n curr.append(')')\n generate_all(curr)\n curr.pop()\n generate_all()\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 0:\n return ['']\n ans = []\n for c in range(N):\n for left in self.generateParenthesis(c):\n for right in self.generateParenthesis(N - 1 - c):\n ans.append('({}){}'.format(left, right))\n return ans\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000308", "length_bytes": 1685, "license_type": "no_license", "methods": [{"docstring": "Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.", "name": "generateParenthesis", "signature": "def generateParenthesis(self, n: int) -> List[str]"}, {"docstring": "Brute force solution, try all possible combinations and check if they are valid.", "name": "generateParenthesis2", "signature": "def generateParenthesis2(self, n: int) -> List[str]"}, {"docstring": "Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.", "name": "generateParenthesis3", "signature": "def generateParenthesis3(self, N)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001294", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def generateParenthesis(self, n: int) -> List[str]: Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\n- def generateParenthesis2(self, n: int) -> List[str]: Brute force solution, try all possible combinations and check if they are valid.\n- def generateParenthesis3(self, N): Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def generateParenthesis(self, n: int) -> List[str]: Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\n- def generateParenthesis2(self, n: int) -> List[str]: Brute force solution, try all possible combinations and check if they are valid.\n- def generateParenthesis3(self, N): Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.\n\n<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n: int) -> List[str]:\n \"\"\"Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\"\"\"\n <|body_0|>\n\n def generateParenthesis2(self, n: int) -> List[str]:\n \"\"\"Brute force solution, try all possible combinations and check if they are valid.\"\"\"\n <|body_1|>\n\n def generateParenthesis3(self, N):\n \"\"\"Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def _generate(open, close, str, l=[]):\n if open == 0 and close == 0:\n l.append(str)\n if open > 0:\n _generate(open - 1, close + 1, str + '(', l)\n if close > 0:\n _generate(open, close - 1, str + ')', l)\n return l\n return _generate(n, 0, '')\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n\n def is_valid(s):\n b = 0\n for c in s:\n if c == '(':\n b += 1\n else:\n b -= 1\n if b < 0:\n return False\n return b == 0\n\n def generate_all(curr=[]):\n if 2 * n == len(curr):\n if is_valid(curr):\n res.append(''.join(curr))\n else:\n curr.append('(')\n generate_all(curr)\n curr.pop()\n curr.append(')')\n generate_all(curr)\n curr.pop()\n generate_all()\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if N == 0:\n return ['']\n ans = []\n for c in range(N):\n for left in self.generateParenthesis(c):\n for right in self.generateParenthesis(N - 1 - c):\n ans.append('({}){}'.format(left, right))\n return ans\n<|end_body_2|>\n", "revision_id": "9a0e41d2d718803eb297430995e464fcab472a55", "skeleton": "<|skeleton|>\nclass Solution:\n\n def generateParenthesis(self, n: int) -> List[str]:\n \"\"\"Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\"\"\"\n <|body_0|>\n\n def generateParenthesis2(self, n: int) -> List[str]:\n \"\"\"Brute force solution, try all possible combinations and check if they are valid.\"\"\"\n <|body_1|>\n\n def generateParenthesis3(self, N):\n \"\"\"Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n \"\"\"Recursively generate all possible parenthesis. For each open paranthesis, there must be a close paranthesis. At first, we have n opens at hand and we try to use them all. When all open&closed pars are left, we add the string to the result.\"\"\"\n def _generate(open, close, str, l=[]):\n if open == 0 and close == 0:\n l.append(str)\n if open > 0:\n _generate(open - 1, close + 1, str + '(', l)\n if close > 0:\n _generate(open, close - 1, str + ')', l)\n return l\n return _generate(n, 0, '')\n\n def generateParenthesis2(self, n: int) -> List[str]:\n \"\"\"Brute force solution, try all possible combinations and check if they are valid.\"\"\"\n res = []\n\n def is_valid(s):\n b = 0\n for c in s:\n if c == '(':\n b += 1\n else:\n b -= 1\n if b < 0:\n return False\n return b == 0\n\n def generate_all(curr=[]):\n if 2 * n == len(curr):\n if is_valid(curr):\n res.append(''.join(curr))\n else:\n curr.append('(')\n generate_all(curr)\n curr.pop()\n curr.append(')')\n generate_all(curr)\n curr.pop()\n generate_all()\n return res\n\n def generateParenthesis3(self, N):\n \"\"\"Considers a closure number c for each possible n, which are guaranteed to have ( at indice 0 and ) at 2*c+1. Then it adds the other valid sequences to indices between them.\"\"\"\n if N == 0:\n return ['']\n ans = []\n for c in range(N):\n for left in self.generateParenthesis(c):\n for right in self.generateParenthesis(N - 1 - c):\n ans.append('({}){}'.format(left, right))\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/22.py", "source_repo": "evinpinar/competitive_python", "split": "val", "star_events_count": 0} {"blob_id": "5d6142f85da94de9f03aa096e3e2037932f1f3ea", "bodies": ["timestamp = plist_key.get(plist_value_name, None)\nif timestamp is None:\n return None\nreturn dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)", "for _, process_name, process_values in self._RecurseKey(top_level, depth=1):\n if process_name == 'CacheVersion':\n continue\n for apple_identifier, apple_identifier_values in process_values.items():\n event_data = IOSIdstatusacheEventData()\n event_data.apple_identifier = apple_identifier\n event_data.lookup_time = self._GetDateTimeValueFromPlistKey(apple_identifier_values, 'LookupDate')\n event_data.process_name = process_name\n parser_mediator.ProduceEventData(event_data)"], "bodies_text": "<|body_start_0|>\n timestamp = plist_key.get(plist_value_name, None)\n if timestamp is None:\n return None\n return dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n for _, process_name, process_values in self._RecurseKey(top_level, depth=1):\n if process_name == 'CacheVersion':\n continue\n for apple_identifier, apple_identifier_values in process_values.items():\n event_data = IOSIdstatusacheEventData()\n event_data.apple_identifier = apple_identifier\n event_data.lookup_time = self._GetDateTimeValueFromPlistKey(apple_identifier_values, 'LookupDate')\n event_data.process_name = process_name\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "class_docstring": "Plist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist", "class_name": "IOSIdstatusachePlistPlugin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IOSIdstatusachePlistPlugin:\n \"\"\"Plist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\"\"\"\n\n def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name):\n \"\"\"Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\"\"\"\n <|body_0|>\n\n def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs):\n \"\"\"Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp = plist_key.get(plist_value_name, None)\n if timestamp is None:\n return None\n return dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n for _, process_name, process_values in self._RecurseKey(top_level, depth=1):\n if process_name == 'CacheVersion':\n continue\n for apple_identifier, apple_identifier_values in process_values.items():\n event_data = IOSIdstatusacheEventData()\n event_data.apple_identifier = apple_identifier\n event_data.lookup_time = self._GetDateTimeValueFromPlistKey(apple_identifier_values, 'LookupDate')\n event_data.process_name = process_name\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000309", "length_bytes": 2979, "license_type": "permissive", "methods": [{"docstring": "Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.", "name": "_GetDateTimeValueFromPlistKey", "signature": "def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name)"}, {"docstring": "Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.", "name": "_ParsePlist", "signature": "def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `IOSIdstatusachePlistPlugin` described below.\n\nClass description:\nPlist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\n\nMethod signatures and docstrings:\n- def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name): Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\n- def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs): Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.", "prompted_full_text": "Implement the Python class `IOSIdstatusachePlistPlugin` described below.\n\nClass description:\nPlist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\n\nMethod signatures and docstrings:\n- def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name): Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\n- def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs): Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.\n\n<|skeleton|>\nclass IOSIdstatusachePlistPlugin:\n \"\"\"Plist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\"\"\"\n\n def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name):\n \"\"\"Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\"\"\"\n <|body_0|>\n\n def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs):\n \"\"\"Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp = plist_key.get(plist_value_name, None)\n if timestamp is None:\n return None\n return dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n for _, process_name, process_values in self._RecurseKey(top_level, depth=1):\n if process_name == 'CacheVersion':\n continue\n for apple_identifier, apple_identifier_values in process_values.items():\n event_data = IOSIdstatusacheEventData()\n event_data.apple_identifier = apple_identifier\n event_data.lookup_time = self._GetDateTimeValueFromPlistKey(apple_identifier_values, 'LookupDate')\n event_data.process_name = process_name\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "revision_id": "d6022f8cfebfddf2d08ab2d300a41b61f3349933", "skeleton": "<|skeleton|>\nclass IOSIdstatusachePlistPlugin:\n \"\"\"Plist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\"\"\"\n\n def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name):\n \"\"\"Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\"\"\"\n <|body_0|>\n\n def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs):\n \"\"\"Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IOSIdstatusachePlistPlugin:\n \"\"\"Plist parser plugin for identity services status cache files. Identity services status cache plist files are typically named: com.apple.identityservices.idstatuscache.plist\"\"\"\n\n def _GetDateTimeValueFromPlistKey(self, plist_key, plist_value_name):\n \"\"\"Retrieves a date and time value from a specific value in a plist key. Args: plist_key (object): plist key. plist_value_name (str): name of the value in the plist key. Returns: dfdatetime.TimeElementsInMicroseconds: date and time or None if not available.\"\"\"\n timestamp = plist_key.get(plist_value_name, None)\n if timestamp is None:\n return None\n return dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n\n def _ParsePlist(self, parser_mediator, match=None, top_level=None, **unused_kwargs):\n \"\"\"Extracts identity services status cache information from the plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. top_level (Optional[dict[str, object]]): plist top-level item.\"\"\"\n for _, process_name, process_values in self._RecurseKey(top_level, depth=1):\n if process_name == 'CacheVersion':\n continue\n for apple_identifier, apple_identifier_values in process_values.items():\n event_data = IOSIdstatusacheEventData()\n event_data.apple_identifier = apple_identifier\n event_data.lookup_time = self._GetDateTimeValueFromPlistKey(apple_identifier_values, 'LookupDate')\n event_data.process_name = process_name\n parser_mediator.ProduceEventData(event_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "plaso/parsers/plist_plugins/ios_identityservices.py", "source_repo": "log2timeline/plaso", "split": "val", "star_events_count": 1506} {"blob_id": "c5f142fcde19f8fd33c2da6104bffa35f1732534", "bodies": ["if not self.has_feature(request, organization):\n return Response(status=404)\nproject = self.get_project(request, organization)\nbase_filter = {'organization': organization, 'owner': request.user}\nwith transaction.atomic():\n serializer = KeyTransactionSerializer(data=request.data, context=base_filter)\n if serializer.is_valid():\n data = serializer.validated_data\n base_filter['transaction'] = data['transaction']\n base_filter['project'] = project\n if KeyTransaction.objects.filter(**base_filter).exists():\n return Response(status=204)\n try:\n KeyTransaction.objects.create(**base_filter)\n return Response(status=201)\n except IntegrityError:\n return Response(status=204)\n return Response(serializer.errors, status=400)", "if not self.has_feature(request, organization):\n return Response(status=404)\nproject = self.get_project(request, organization)\ntransaction = request.data['transaction']\ntry:\n model = KeyTransaction.objects.get(transaction=transaction, organization=organization, project=project, owner=request.user)\nexcept KeyTransaction.DoesNotExist:\n return Response(status=204)\nmodel.delete()\nreturn Response(status=204)"], "bodies_text": "<|body_start_0|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n base_filter = {'organization': organization, 'owner': request.user}\n with transaction.atomic():\n serializer = KeyTransactionSerializer(data=request.data, context=base_filter)\n if serializer.is_valid():\n data = serializer.validated_data\n base_filter['transaction'] = data['transaction']\n base_filter['project'] = project\n if KeyTransaction.objects.filter(**base_filter).exists():\n return Response(status=204)\n try:\n KeyTransaction.objects.create(**base_filter)\n return Response(status=201)\n except IntegrityError:\n return Response(status=204)\n return Response(serializer.errors, status=400)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n transaction = request.data['transaction']\n try:\n model = KeyTransaction.objects.get(transaction=transaction, organization=organization, project=project, owner=request.user)\n except KeyTransaction.DoesNotExist:\n return Response(status=204)\n model.delete()\n return Response(status=204)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KeyTransactionEndpoint", "detected_licenses": ["BUSL-1.1", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KeyTransactionEndpoint:\n\n def post(self, request, organization):\n \"\"\"Create a Key Transaction\"\"\"\n <|body_0|>\n\n def delete(self, request, organization):\n \"\"\"Remove a Key transaction for a user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n base_filter = {'organization': organization, 'owner': request.user}\n with transaction.atomic():\n serializer = KeyTransactionSerializer(data=request.data, context=base_filter)\n if serializer.is_valid():\n data = serializer.validated_data\n base_filter['transaction'] = data['transaction']\n base_filter['project'] = project\n if KeyTransaction.objects.filter(**base_filter).exists():\n return Response(status=204)\n try:\n KeyTransaction.objects.create(**base_filter)\n return Response(status=201)\n except IntegrityError:\n return Response(status=204)\n return Response(serializer.errors, status=400)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n transaction = request.data['transaction']\n try:\n model = KeyTransaction.objects.get(transaction=transaction, organization=organization, project=project, owner=request.user)\n except KeyTransaction.DoesNotExist:\n return Response(status=204)\n model.delete()\n return Response(status=204)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000310", "length_bytes": 3249, "license_type": "permissive", "methods": [{"docstring": "Create a Key Transaction", "name": "post", "signature": "def post(self, request, organization)"}, {"docstring": "Remove a Key transaction for a user", "name": "delete", "signature": "def delete(self, request, organization)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000689", "prompt": "Implement the Python class `KeyTransactionEndpoint` described below.\n\nClass description:\nImplement the KeyTransactionEndpoint class.\n\nMethod signatures and docstrings:\n- def post(self, request, organization): Create a Key Transaction\n- def delete(self, request, organization): Remove a Key transaction for a user", "prompted_full_text": "Implement the Python class `KeyTransactionEndpoint` described below.\n\nClass description:\nImplement the KeyTransactionEndpoint class.\n\nMethod signatures and docstrings:\n- def post(self, request, organization): Create a Key Transaction\n- def delete(self, request, organization): Remove a Key transaction for a user\n\n<|skeleton|>\nclass KeyTransactionEndpoint:\n\n def post(self, request, organization):\n \"\"\"Create a Key Transaction\"\"\"\n <|body_0|>\n\n def delete(self, request, organization):\n \"\"\"Remove a Key transaction for a user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n base_filter = {'organization': organization, 'owner': request.user}\n with transaction.atomic():\n serializer = KeyTransactionSerializer(data=request.data, context=base_filter)\n if serializer.is_valid():\n data = serializer.validated_data\n base_filter['transaction'] = data['transaction']\n base_filter['project'] = project\n if KeyTransaction.objects.filter(**base_filter).exists():\n return Response(status=204)\n try:\n KeyTransaction.objects.create(**base_filter)\n return Response(status=201)\n except IntegrityError:\n return Response(status=204)\n return Response(serializer.errors, status=400)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n transaction = request.data['transaction']\n try:\n model = KeyTransaction.objects.get(transaction=transaction, organization=organization, project=project, owner=request.user)\n except KeyTransaction.DoesNotExist:\n return Response(status=204)\n model.delete()\n return Response(status=204)\n<|end_body_1|>\n", "revision_id": "63d698f5294f64a8c206b4c741e2a11be1f9a9be", "skeleton": "<|skeleton|>\nclass KeyTransactionEndpoint:\n\n def post(self, request, organization):\n \"\"\"Create a Key Transaction\"\"\"\n <|body_0|>\n\n def delete(self, request, organization):\n \"\"\"Remove a Key transaction for a user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KeyTransactionEndpoint:\n def post(self, request, organization):\n \"\"\"Create a Key Transaction\"\"\"\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n base_filter = {'organization': organization, 'owner': request.user}\n with transaction.atomic():\n serializer = KeyTransactionSerializer(data=request.data, context=base_filter)\n if serializer.is_valid():\n data = serializer.validated_data\n base_filter['transaction'] = data['transaction']\n base_filter['project'] = project\n if KeyTransaction.objects.filter(**base_filter).exists():\n return Response(status=204)\n try:\n KeyTransaction.objects.create(**base_filter)\n return Response(status=201)\n except IntegrityError:\n return Response(status=204)\n return Response(serializer.errors, status=400)\n\n def delete(self, request, organization):\n \"\"\"Remove a Key transaction for a user\"\"\"\n if not self.has_feature(request, organization):\n return Response(status=404)\n project = self.get_project(request, organization)\n transaction = request.data['transaction']\n try:\n model = KeyTransaction.objects.get(transaction=transaction, organization=organization, project=project, owner=request.user)\n except KeyTransaction.DoesNotExist:\n return Response(status=204)\n model.delete()\n return Response(status=204)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/sentry/discover/endpoints/discover_key_transactions.py", "source_repo": "kaozdl/sentry", "split": "val", "star_events_count": 0} {"blob_id": "0f065cc62950dd0d4b1e42b7e5eb47ba1562811f", "bodies": ["super(Transformer, self).__init__()\nself.encoder = Encoder(N, dm, h, hidden, input_vocab, max_seq_input, drop_rate)\nself.decoder = Decoder(N, dm, h, hidden, target_vocab, max_seq_target, drop_rate)\nself.linear = tf.keras.layers.Dense(target_vocab)", "enc = self.encoder(inputs, training, encoder_mask)\ndec = self.decoder(target, enc, training, look_ahead_mask, decoder_mask)\noutput = self.linear(dec)\nreturn output"], "bodies_text": "<|body_start_0|>\n super(Transformer, self).__init__()\n self.encoder = Encoder(N, dm, h, hidden, input_vocab, max_seq_input, drop_rate)\n self.decoder = Decoder(N, dm, h, hidden, target_vocab, max_seq_target, drop_rate)\n self.linear = tf.keras.layers.Dense(target_vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n enc = self.encoder(inputs, training, encoder_mask)\n dec = self.decoder(target, enc, training, look_ahead_mask, decoder_mask)\n output = self.linear(dec)\n return output\n<|end_body_1|>\n", "class_docstring": "a transformer network", "class_name": "Transformer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Transformer:\n \"\"\"a transformer network\"\"\"\n\n def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1):\n \"\"\"initialization\"\"\"\n <|body_0|>\n\n def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask):\n \"\"\"call function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Transformer, self).__init__()\n self.encoder = Encoder(N, dm, h, hidden, input_vocab, max_seq_input, drop_rate)\n self.decoder = Decoder(N, dm, h, hidden, target_vocab, max_seq_target, drop_rate)\n self.linear = tf.keras.layers.Dense(target_vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n enc = self.encoder(inputs, training, encoder_mask)\n dec = self.decoder(target, enc, training, look_ahead_mask, decoder_mask)\n output = self.linear(dec)\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000311", "length_bytes": 1355, "license_type": "no_license", "methods": [{"docstring": "initialization", "name": "__init__", "signature": "def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1)"}, {"docstring": "call function", "name": "call", "signature": "def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004758", "prompt": "Implement the Python class `Transformer` described below.\n\nClass description:\na transformer network\n\nMethod signatures and docstrings:\n- def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1): initialization\n- def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask): call function", "prompted_full_text": "Implement the Python class `Transformer` described below.\n\nClass description:\na transformer network\n\nMethod signatures and docstrings:\n- def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1): initialization\n- def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask): call function\n\n<|skeleton|>\nclass Transformer:\n \"\"\"a transformer network\"\"\"\n\n def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1):\n \"\"\"initialization\"\"\"\n <|body_0|>\n\n def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask):\n \"\"\"call function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Transformer, self).__init__()\n self.encoder = Encoder(N, dm, h, hidden, input_vocab, max_seq_input, drop_rate)\n self.decoder = Decoder(N, dm, h, hidden, target_vocab, max_seq_target, drop_rate)\n self.linear = tf.keras.layers.Dense(target_vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n enc = self.encoder(inputs, training, encoder_mask)\n dec = self.decoder(target, enc, training, look_ahead_mask, decoder_mask)\n output = self.linear(dec)\n return output\n<|end_body_1|>\n", "revision_id": "16dc37d1c6dc00a271053b60724c51763914029a", "skeleton": "<|skeleton|>\nclass Transformer:\n \"\"\"a transformer network\"\"\"\n\n def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1):\n \"\"\"initialization\"\"\"\n <|body_0|>\n\n def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask):\n \"\"\"call function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Transformer:\n \"\"\"a transformer network\"\"\"\n\n def __init__(self, N, dm, h, hidden, input_vocab, target_vocab, max_seq_input, max_seq_target, drop_rate=0.1):\n \"\"\"initialization\"\"\"\n super(Transformer, self).__init__()\n self.encoder = Encoder(N, dm, h, hidden, input_vocab, max_seq_input, drop_rate)\n self.decoder = Decoder(N, dm, h, hidden, target_vocab, max_seq_target, drop_rate)\n self.linear = tf.keras.layers.Dense(target_vocab)\n\n def call(self, inputs, target, training, encoder_mask, look_ahead_mask, decoder_mask):\n \"\"\"call function\"\"\"\n enc = self.encoder(inputs, training, encoder_mask)\n dec = self.decoder(target, enc, training, look_ahead_mask, decoder_mask)\n output = self.linear(dec)\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x11-attention/11-transformer.py", "source_repo": "jaycer95/holbertonschool-machine_learning", "split": "val", "star_events_count": 0} {"blob_id": "fd94796047c557b42d455180121d18b4c96ee72f", "bodies": ["from scoop.content.models import Attachment\nuuid = self.value\nlink = Attachment.objects.get_link_by_uuid(uuid)\nreturn {'link': link}", "base = super(AttachmentInline, self).get_template_name()[0]\npath = 'content/{}'.format(base)\nreturn path"], "bodies_text": "<|body_start_0|>\n from scoop.content.models import Attachment\n uuid = self.value\n link = Attachment.objects.get_link_by_uuid(uuid)\n return {'link': link}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AttachmentInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "class_docstring": "Inline d'insertion de pièces jointes Format : {{attachment uuid}}", "class_name": "AttachmentInline", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AttachmentInline:\n \"\"\"Inline d'insertion de pièces jointes Format : {{attachment uuid}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scoop.content.models import Attachment\n uuid = self.value\n link = Attachment.objects.get_link_by_uuid(uuid)\n return {'link': link}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AttachmentInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000312", "length_bytes": 6816, "license_type": "no_license", "methods": [{"docstring": "Renvoyer le contexte de rendu de l'inline", "name": "get_context", "signature": "def get_context(self)"}, {"docstring": "Renvoyer le chemin du template", "name": "get_template_name", "signature": "def get_template_name(self)"}], "n_methods": 2, "prompt": "Implement the Python class `AttachmentInline` described below.\n\nClass description:\nInline d'insertion de pièces jointes Format : {{attachment uuid}}\n\nMethod signatures and docstrings:\n- def get_context(self): Renvoyer le contexte de rendu de l'inline\n- def get_template_name(self): Renvoyer le chemin du template", "prompted_full_text": "Implement the Python class `AttachmentInline` described below.\n\nClass description:\nInline d'insertion de pièces jointes Format : {{attachment uuid}}\n\nMethod signatures and docstrings:\n- def get_context(self): Renvoyer le contexte de rendu de l'inline\n- def get_template_name(self): Renvoyer le chemin du template\n\n<|skeleton|>\nclass AttachmentInline:\n \"\"\"Inline d'insertion de pièces jointes Format : {{attachment uuid}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scoop.content.models import Attachment\n uuid = self.value\n link = Attachment.objects.get_link_by_uuid(uuid)\n return {'link': link}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AttachmentInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "revision_id": "8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7", "skeleton": "<|skeleton|>\nclass AttachmentInline:\n \"\"\"Inline d'insertion de pièces jointes Format : {{attachment uuid}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AttachmentInline:\n \"\"\"Inline d'insertion de pièces jointes Format : {{attachment uuid}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n from scoop.content.models import Attachment\n uuid = self.value\n link = Attachment.objects.get_link_by_uuid(uuid)\n return {'link': link}\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n base = super(AttachmentInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n", "source": "the_stack_v2_python_sparse", "source_path": "scoop/content/util/inlines.py", "source_repo": "artscoop/scoop", "split": "val", "star_events_count": 0} {"blob_id": "a7e9bccb4b45d7b4798815cece15639e206a52cd", "bodies": ["PeakDetector.__init__(self, recording, return_output=return_output)\nself.peak_detector_node = peak_detector_node\nself.waveform_extraction_node = waveform_extraction_node\nself.waveform_denoising_node = waveform_denoising_node\nself.num_iterations = num_iterations\nself.tresholds = tresholds", "internal_pipeline = (self.peak_detector_node, self.waveform_extraction_node, self.waveform_denoising_node)\npipeline_margin = (node.get_trace_margin() for node in internal_pipeline if hasattr(node, 'get_trace_margin'))\nreturn max(pipeline_margin)", "traces_chunk = np.array(traces_chunk, copy=True, dtype='float32')\nlocal_peaks_list = []\nall_waveforms = []\nfor iteration in range(self.num_iterations):\n if self.tresholds is not None:\n old_args = self.peak_detector_node.args\n old_detect_treshold = self.peak_detector_node.params['detect_threshold']\n old_abs_treshold = old_args[1]\n new_abs_treshold = old_abs_treshold * self.tresholds[iteration] / old_detect_treshold\n new_args = tuple((val if index != 1 else new_abs_treshold for index, val in enumerate(old_args)))\n self.peak_detector_node.args = new_args\n local_peaks, = self.peak_detector_node.compute(traces=traces_chunk, start_frame=start_frame, end_frame=end_frame, segment_index=segment_index, max_margin=max_margin)\n local_peaks = self.add_iteration_to_peaks_dtype(local_peaks=local_peaks, iteration=iteration)\n local_peaks_list.append(local_peaks)\n if local_peaks.size == 0:\n break\n waveforms = self.waveform_extraction_node.compute(traces=traces_chunk, peaks=local_peaks)\n denoised_waveforms = self.waveform_denoising_node.compute(traces=traces_chunk, peaks=local_peaks, waveforms=waveforms)\n self.substract_waveforms_from_traces(local_peaks=local_peaks, traces_chunk=traces_chunk, waveforms=denoised_waveforms)\n all_waveforms.append(waveforms)\nall_local_peaks = np.concatenate(local_peaks_list, axis=0)\nall_waveforms = np.concatenate(all_waveforms, axis=0) if len(all_waveforms) != 0 else np.empty((0, 0, 0))\nsorting_indices = np.argsort(all_local_peaks['sample_index'])\nall_local_peaks = all_local_peaks[sorting_indices]\nall_waveforms = all_waveforms[sorting_indices]\nreturn (all_local_peaks, all_waveforms)", "nbefore = self.waveform_extraction_node.nbefore\nnafter = self.waveform_extraction_node.nafter\nif isinstance(self.waveform_extraction_node, ExtractSparseWaveforms):\n neighbours_mask = self.waveform_extraction_node.neighbours_mask\nelse:\n neighbours_mask = None\nfor peak_index, peak in enumerate(local_peaks):\n center_sample = peak['sample_index']\n first_sample = center_sample - nbefore\n last_sample = center_sample + nafter\n if neighbours_mask is None:\n traces_chunk[first_sample:last_sample, :] -= waveforms[peak_index, :, :]\n else:\n channels, = np.nonzero(neighbours_mask[peak['channel_index']])\n traces_chunk[first_sample:last_sample, channels] -= waveforms[peak_index, :, :len(channels)]", "local_peaks_expanded = np.zeros_like(local_peaks, dtype=expanded_base_peak_dtype)\nfields_in_base_type = np.dtype(base_peak_dtype).names\nfor field in fields_in_base_type:\n local_peaks_expanded[field] = local_peaks[field]\nlocal_peaks_expanded['iteration'] = iteration\nreturn local_peaks_expanded"], "bodies_text": "<|body_start_0|>\n PeakDetector.__init__(self, recording, return_output=return_output)\n self.peak_detector_node = peak_detector_node\n self.waveform_extraction_node = waveform_extraction_node\n self.waveform_denoising_node = waveform_denoising_node\n self.num_iterations = num_iterations\n self.tresholds = tresholds\n<|end_body_0|>\n\n<|body_start_1|>\n internal_pipeline = (self.peak_detector_node, self.waveform_extraction_node, self.waveform_denoising_node)\n pipeline_margin = (node.get_trace_margin() for node in internal_pipeline if hasattr(node, 'get_trace_margin'))\n return max(pipeline_margin)\n<|end_body_1|>\n\n<|body_start_2|>\n traces_chunk = np.array(traces_chunk, copy=True, dtype='float32')\n local_peaks_list = []\n all_waveforms = []\n for iteration in range(self.num_iterations):\n if self.tresholds is not None:\n old_args = self.peak_detector_node.args\n old_detect_treshold = self.peak_detector_node.params['detect_threshold']\n old_abs_treshold = old_args[1]\n new_abs_treshold = old_abs_treshold * self.tresholds[iteration] / old_detect_treshold\n new_args = tuple((val if index != 1 else new_abs_treshold for index, val in enumerate(old_args)))\n self.peak_detector_node.args = new_args\n local_peaks, = self.peak_detector_node.compute(traces=traces_chunk, start_frame=start_frame, end_frame=end_frame, segment_index=segment_index, max_margin=max_margin)\n local_peaks = self.add_iteration_to_peaks_dtype(local_peaks=local_peaks, iteration=iteration)\n local_peaks_list.append(local_peaks)\n if local_peaks.size == 0:\n break\n waveforms = self.waveform_extraction_node.compute(traces=traces_chunk, peaks=local_peaks)\n denoised_waveforms = self.waveform_denoising_node.compute(traces=traces_chunk, peaks=local_peaks, waveforms=waveforms)\n self.substract_waveforms_from_traces(local_peaks=local_peaks, traces_chunk=traces_chunk, waveforms=denoised_waveforms)\n all_waveforms.append(waveforms)\n all_local_peaks = np.concatenate(local_peaks_list, axis=0)\n all_waveforms = np.concatenate(all_waveforms, axis=0) if len(all_waveforms) != 0 else np.empty((0, 0, 0))\n sorting_indices = np.argsort(all_local_peaks['sample_index'])\n all_local_peaks = all_local_peaks[sorting_indices]\n all_waveforms = all_waveforms[sorting_indices]\n return (all_local_peaks, all_waveforms)\n<|end_body_2|>\n\n<|body_start_3|>\n nbefore = self.waveform_extraction_node.nbefore\n nafter = self.waveform_extraction_node.nafter\n if isinstance(self.waveform_extraction_node, ExtractSparseWaveforms):\n neighbours_mask = self.waveform_extraction_node.neighbours_mask\n else:\n neighbours_mask = None\n for peak_index, peak in enumerate(local_peaks):\n center_sample = peak['sample_index']\n first_sample = center_sample - nbefore\n last_sample = center_sample + nafter\n if neighbours_mask is None:\n traces_chunk[first_sample:last_sample, :] -= waveforms[peak_index, :, :]\n else:\n channels, = np.nonzero(neighbours_mask[peak['channel_index']])\n traces_chunk[first_sample:last_sample, channels] -= waveforms[peak_index, :, :len(channels)]\n<|end_body_3|>\n\n<|body_start_4|>\n local_peaks_expanded = np.zeros_like(local_peaks, dtype=expanded_base_peak_dtype)\n fields_in_base_type = np.dtype(base_peak_dtype).names\n for field in fields_in_base_type:\n local_peaks_expanded[field] = local_peaks[field]\n local_peaks_expanded['iteration'] = iteration\n return local_peaks_expanded\n<|end_body_4|>\n", "class_docstring": "A class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.", "class_name": "IterativePeakDetector", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IterativePeakDetector:\n \"\"\"A class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\"\"\"\n\n def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None):\n \"\"\"Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\"\"\"\n <|body_0|>\n\n def get_trace_margin(self) -> int:\n \"\"\"Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\"\"\"\n <|body_1|>\n\n def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\"\"\"\n <|body_2|>\n\n def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray):\n \"\"\"Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\"\"\"\n <|body_3|>\n\n def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray:\n \"\"\"Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n PeakDetector.__init__(self, recording, return_output=return_output)\n self.peak_detector_node = peak_detector_node\n self.waveform_extraction_node = waveform_extraction_node\n self.waveform_denoising_node = waveform_denoising_node\n self.num_iterations = num_iterations\n self.tresholds = tresholds\n<|end_body_0|>\n\n<|body_start_1|>\n internal_pipeline = (self.peak_detector_node, self.waveform_extraction_node, self.waveform_denoising_node)\n pipeline_margin = (node.get_trace_margin() for node in internal_pipeline if hasattr(node, 'get_trace_margin'))\n return max(pipeline_margin)\n<|end_body_1|>\n\n<|body_start_2|>\n traces_chunk = np.array(traces_chunk, copy=True, dtype='float32')\n local_peaks_list = []\n all_waveforms = []\n for iteration in range(self.num_iterations):\n if self.tresholds is not None:\n old_args = self.peak_detector_node.args\n old_detect_treshold = self.peak_detector_node.params['detect_threshold']\n old_abs_treshold = old_args[1]\n new_abs_treshold = old_abs_treshold * self.tresholds[iteration] / old_detect_treshold\n new_args = tuple((val if index != 1 else new_abs_treshold for index, val in enumerate(old_args)))\n self.peak_detector_node.args = new_args\n local_peaks, = self.peak_detector_node.compute(traces=traces_chunk, start_frame=start_frame, end_frame=end_frame, segment_index=segment_index, max_margin=max_margin)\n local_peaks = self.add_iteration_to_peaks_dtype(local_peaks=local_peaks, iteration=iteration)\n local_peaks_list.append(local_peaks)\n if local_peaks.size == 0:\n break\n waveforms = self.waveform_extraction_node.compute(traces=traces_chunk, peaks=local_peaks)\n denoised_waveforms = self.waveform_denoising_node.compute(traces=traces_chunk, peaks=local_peaks, waveforms=waveforms)\n self.substract_waveforms_from_traces(local_peaks=local_peaks, traces_chunk=traces_chunk, waveforms=denoised_waveforms)\n all_waveforms.append(waveforms)\n all_local_peaks = np.concatenate(local_peaks_list, axis=0)\n all_waveforms = np.concatenate(all_waveforms, axis=0) if len(all_waveforms) != 0 else np.empty((0, 0, 0))\n sorting_indices = np.argsort(all_local_peaks['sample_index'])\n all_local_peaks = all_local_peaks[sorting_indices]\n all_waveforms = all_waveforms[sorting_indices]\n return (all_local_peaks, all_waveforms)\n<|end_body_2|>\n\n<|body_start_3|>\n nbefore = self.waveform_extraction_node.nbefore\n nafter = self.waveform_extraction_node.nafter\n if isinstance(self.waveform_extraction_node, ExtractSparseWaveforms):\n neighbours_mask = self.waveform_extraction_node.neighbours_mask\n else:\n neighbours_mask = None\n for peak_index, peak in enumerate(local_peaks):\n center_sample = peak['sample_index']\n first_sample = center_sample - nbefore\n last_sample = center_sample + nafter\n if neighbours_mask is None:\n traces_chunk[first_sample:last_sample, :] -= waveforms[peak_index, :, :]\n else:\n channels, = np.nonzero(neighbours_mask[peak['channel_index']])\n traces_chunk[first_sample:last_sample, channels] -= waveforms[peak_index, :, :len(channels)]\n<|end_body_3|>\n\n<|body_start_4|>\n local_peaks_expanded = np.zeros_like(local_peaks, dtype=expanded_base_peak_dtype)\n fields_in_base_type = np.dtype(base_peak_dtype).names\n for field in fields_in_base_type:\n local_peaks_expanded[field] = local_peaks[field]\n local_peaks_expanded['iteration'] = iteration\n return local_peaks_expanded\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000313", "length_bytes": 40308, "license_type": "permissive", "methods": [{"docstring": "Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True", "name": "__init__", "signature": "def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None)"}, {"docstring": "Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.", "name": "get_trace_margin", "signature": "def get_trace_margin(self) -> int"}, {"docstring": "Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.", "name": "compute", "signature": "def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]"}, {"docstring": "Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.", "name": "substract_waveforms_from_traces", "signature": "def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray)"}, {"docstring": "Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.", "name": "add_iteration_to_peaks_dtype", "signature": "def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray"}], "n_methods": 5, "prompt": "Implement the Python class `IterativePeakDetector` described below.\n\nClass description:\nA class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\n\nMethod signatures and docstrings:\n- def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None): Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\n- def get_trace_margin(self) -> int: Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\n- def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]: Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\n- def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray): Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\n- def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray: Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.", "prompted_full_text": "Implement the Python class `IterativePeakDetector` described below.\n\nClass description:\nA class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\n\nMethod signatures and docstrings:\n- def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None): Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\n- def get_trace_margin(self) -> int: Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\n- def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]: Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\n- def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray): Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\n- def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray: Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.\n\n<|skeleton|>\nclass IterativePeakDetector:\n \"\"\"A class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\"\"\"\n\n def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None):\n \"\"\"Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\"\"\"\n <|body_0|>\n\n def get_trace_margin(self) -> int:\n \"\"\"Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\"\"\"\n <|body_1|>\n\n def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\"\"\"\n <|body_2|>\n\n def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray):\n \"\"\"Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\"\"\"\n <|body_3|>\n\n def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray:\n \"\"\"Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n PeakDetector.__init__(self, recording, return_output=return_output)\n self.peak_detector_node = peak_detector_node\n self.waveform_extraction_node = waveform_extraction_node\n self.waveform_denoising_node = waveform_denoising_node\n self.num_iterations = num_iterations\n self.tresholds = tresholds\n<|end_body_0|>\n\n<|body_start_1|>\n internal_pipeline = (self.peak_detector_node, self.waveform_extraction_node, self.waveform_denoising_node)\n pipeline_margin = (node.get_trace_margin() for node in internal_pipeline if hasattr(node, 'get_trace_margin'))\n return max(pipeline_margin)\n<|end_body_1|>\n\n<|body_start_2|>\n traces_chunk = np.array(traces_chunk, copy=True, dtype='float32')\n local_peaks_list = []\n all_waveforms = []\n for iteration in range(self.num_iterations):\n if self.tresholds is not None:\n old_args = self.peak_detector_node.args\n old_detect_treshold = self.peak_detector_node.params['detect_threshold']\n old_abs_treshold = old_args[1]\n new_abs_treshold = old_abs_treshold * self.tresholds[iteration] / old_detect_treshold\n new_args = tuple((val if index != 1 else new_abs_treshold for index, val in enumerate(old_args)))\n self.peak_detector_node.args = new_args\n local_peaks, = self.peak_detector_node.compute(traces=traces_chunk, start_frame=start_frame, end_frame=end_frame, segment_index=segment_index, max_margin=max_margin)\n local_peaks = self.add_iteration_to_peaks_dtype(local_peaks=local_peaks, iteration=iteration)\n local_peaks_list.append(local_peaks)\n if local_peaks.size == 0:\n break\n waveforms = self.waveform_extraction_node.compute(traces=traces_chunk, peaks=local_peaks)\n denoised_waveforms = self.waveform_denoising_node.compute(traces=traces_chunk, peaks=local_peaks, waveforms=waveforms)\n self.substract_waveforms_from_traces(local_peaks=local_peaks, traces_chunk=traces_chunk, waveforms=denoised_waveforms)\n all_waveforms.append(waveforms)\n all_local_peaks = np.concatenate(local_peaks_list, axis=0)\n all_waveforms = np.concatenate(all_waveforms, axis=0) if len(all_waveforms) != 0 else np.empty((0, 0, 0))\n sorting_indices = np.argsort(all_local_peaks['sample_index'])\n all_local_peaks = all_local_peaks[sorting_indices]\n all_waveforms = all_waveforms[sorting_indices]\n return (all_local_peaks, all_waveforms)\n<|end_body_2|>\n\n<|body_start_3|>\n nbefore = self.waveform_extraction_node.nbefore\n nafter = self.waveform_extraction_node.nafter\n if isinstance(self.waveform_extraction_node, ExtractSparseWaveforms):\n neighbours_mask = self.waveform_extraction_node.neighbours_mask\n else:\n neighbours_mask = None\n for peak_index, peak in enumerate(local_peaks):\n center_sample = peak['sample_index']\n first_sample = center_sample - nbefore\n last_sample = center_sample + nafter\n if neighbours_mask is None:\n traces_chunk[first_sample:last_sample, :] -= waveforms[peak_index, :, :]\n else:\n channels, = np.nonzero(neighbours_mask[peak['channel_index']])\n traces_chunk[first_sample:last_sample, channels] -= waveforms[peak_index, :, :len(channels)]\n<|end_body_3|>\n\n<|body_start_4|>\n local_peaks_expanded = np.zeros_like(local_peaks, dtype=expanded_base_peak_dtype)\n fields_in_base_type = np.dtype(base_peak_dtype).names\n for field in fields_in_base_type:\n local_peaks_expanded[field] = local_peaks[field]\n local_peaks_expanded['iteration'] = iteration\n return local_peaks_expanded\n<|end_body_4|>\n", "revision_id": "ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf", "skeleton": "<|skeleton|>\nclass IterativePeakDetector:\n \"\"\"A class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\"\"\"\n\n def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None):\n \"\"\"Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\"\"\"\n <|body_0|>\n\n def get_trace_margin(self) -> int:\n \"\"\"Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\"\"\"\n <|body_1|>\n\n def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\"\"\"\n <|body_2|>\n\n def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray):\n \"\"\"Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\"\"\"\n <|body_3|>\n\n def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray:\n \"\"\"Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IterativePeakDetector:\n \"\"\"A class that iteratively detects peaks in the recording by applying a peak detector, waveform extraction, and waveform denoising node. The algorithm runs for a specified number of iterations or until no peaks are found.\"\"\"\n\n def __init__(self, recording: BaseRecording, peak_detector_node: PeakDetector, waveform_extraction_node: WaveformsNode, waveform_denoising_node, num_iterations: int=2, return_output: bool=True, tresholds: Optional[List[float]]=None):\n \"\"\"Initialize the iterative peak detector. Parameters ---------- recording : BaseRecording The recording to process. peak_detector_node : PeakDetector The peak detector node to use. waveform_extraction_node : WaveformsNode The waveform extraction node to use. waveform_denoising_node The waveform denoising node to use. num_iterations : int, optional, default=2 The number of iterations to run the algorithm. return_output : bool, optional, default=True\"\"\"\n PeakDetector.__init__(self, recording, return_output=return_output)\n self.peak_detector_node = peak_detector_node\n self.waveform_extraction_node = waveform_extraction_node\n self.waveform_denoising_node = waveform_denoising_node\n self.num_iterations = num_iterations\n self.tresholds = tresholds\n\n def get_trace_margin(self) -> int:\n \"\"\"Calculate the maximum trace margin from the internal pipeline. Using the strategy as use by the Node pipeline Returns ------- int The maximum trace margin.\"\"\"\n internal_pipeline = (self.peak_detector_node, self.waveform_extraction_node, self.waveform_denoising_node)\n pipeline_margin = (node.get_trace_margin() for node in internal_pipeline if hasattr(node, 'get_trace_margin'))\n return max(pipeline_margin)\n\n def compute(self, traces_chunk, start_frame, end_frame, segment_index, max_margin) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Perform the iterative peak detection, waveform extraction, and denoising. Parameters ---------- traces_chunk : array-like The chunk of traces to process. start_frame : int The starting frame for the chunk. end_frame : int The ending frame for the chunk. segment_index : int The segment index. max_margin : int The maximum margin for the traces. Returns ------- tuple of ndarray A tuple containing a single ndarray with the detected peaks.\"\"\"\n traces_chunk = np.array(traces_chunk, copy=True, dtype='float32')\n local_peaks_list = []\n all_waveforms = []\n for iteration in range(self.num_iterations):\n if self.tresholds is not None:\n old_args = self.peak_detector_node.args\n old_detect_treshold = self.peak_detector_node.params['detect_threshold']\n old_abs_treshold = old_args[1]\n new_abs_treshold = old_abs_treshold * self.tresholds[iteration] / old_detect_treshold\n new_args = tuple((val if index != 1 else new_abs_treshold for index, val in enumerate(old_args)))\n self.peak_detector_node.args = new_args\n local_peaks, = self.peak_detector_node.compute(traces=traces_chunk, start_frame=start_frame, end_frame=end_frame, segment_index=segment_index, max_margin=max_margin)\n local_peaks = self.add_iteration_to_peaks_dtype(local_peaks=local_peaks, iteration=iteration)\n local_peaks_list.append(local_peaks)\n if local_peaks.size == 0:\n break\n waveforms = self.waveform_extraction_node.compute(traces=traces_chunk, peaks=local_peaks)\n denoised_waveforms = self.waveform_denoising_node.compute(traces=traces_chunk, peaks=local_peaks, waveforms=waveforms)\n self.substract_waveforms_from_traces(local_peaks=local_peaks, traces_chunk=traces_chunk, waveforms=denoised_waveforms)\n all_waveforms.append(waveforms)\n all_local_peaks = np.concatenate(local_peaks_list, axis=0)\n all_waveforms = np.concatenate(all_waveforms, axis=0) if len(all_waveforms) != 0 else np.empty((0, 0, 0))\n sorting_indices = np.argsort(all_local_peaks['sample_index'])\n all_local_peaks = all_local_peaks[sorting_indices]\n all_waveforms = all_waveforms[sorting_indices]\n return (all_local_peaks, all_waveforms)\n\n def substract_waveforms_from_traces(self, local_peaks: np.ndarray, traces_chunk: np.ndarray, waveforms: np.ndarray):\n \"\"\"Substract inplace the cleaned waveforms from the traces_chunk. Parameters ---------- sample_indices : ndarray The indices where the waveforms are maximum (peaks[\"sample_index\"]). traces_chunk : ndarray A chunk of the traces. waveforms : ndarray The waveforms extracted from the traces.\"\"\"\n nbefore = self.waveform_extraction_node.nbefore\n nafter = self.waveform_extraction_node.nafter\n if isinstance(self.waveform_extraction_node, ExtractSparseWaveforms):\n neighbours_mask = self.waveform_extraction_node.neighbours_mask\n else:\n neighbours_mask = None\n for peak_index, peak in enumerate(local_peaks):\n center_sample = peak['sample_index']\n first_sample = center_sample - nbefore\n last_sample = center_sample + nafter\n if neighbours_mask is None:\n traces_chunk[first_sample:last_sample, :] -= waveforms[peak_index, :, :]\n else:\n channels, = np.nonzero(neighbours_mask[peak['channel_index']])\n traces_chunk[first_sample:last_sample, channels] -= waveforms[peak_index, :, :len(channels)]\n\n def add_iteration_to_peaks_dtype(self, local_peaks, iteration) -> np.ndarray:\n \"\"\"Add the iteration number to the peaks dtype. Parameters ---------- local_peaks : ndarray The array of local peaks. iteration : int The iteration number. Returns ------- ndarray An array of local peaks with the iteration number added.\"\"\"\n local_peaks_expanded = np.zeros_like(local_peaks, dtype=expanded_base_peak_dtype)\n fields_in_base_type = np.dtype(base_peak_dtype).names\n for field in fields_in_base_type:\n local_peaks_expanded[field] = local_peaks[field]\n local_peaks_expanded['iteration'] = iteration\n return local_peaks_expanded\n", "source": "the_stack_v2_python_sparse", "source_path": "src/spikeinterface/sortingcomponents/peak_detection.py", "source_repo": "SpikeInterface/spikeinterface", "split": "val", "star_events_count": 295} {"blob_id": "82fe6c87cf83102cdfd1210d66201176e4acd41b", "bodies": ["if n < 3:\n return n\none_step_before, two_steps_before = (2, 1)\nall_ways = 0\nfor _ in range(3, n + 1):\n all_ways = one_step_before + two_steps_before\n two_steps_before = one_step_before\n one_step_before = all_ways\nreturn all_ways", "dp = [0] * (n + 1)\ndp[0] = 1\nwhile True:\n dp2 = [0] * (n + 1)\n dp2[-1] = dp[-1]\n for i in range(n):\n if i + 1 <= n:\n dp2[i + 1] += dp[i]\n if i + 2 <= n:\n dp2[i + 2] += dp[i]\n if dp == dp2:\n return dp2[-1]\n else:\n dp = dp2\nreturn 0"], "bodies_text": "<|body_start_0|>\n if n < 3:\n return n\n one_step_before, two_steps_before = (2, 1)\n all_ways = 0\n for _ in range(3, n + 1):\n all_ways = one_step_before + two_steps_before\n two_steps_before = one_step_before\n one_step_before = all_ways\n return all_ways\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [0] * (n + 1)\n dp[0] = 1\n while True:\n dp2 = [0] * (n + 1)\n dp2[-1] = dp[-1]\n for i in range(n):\n if i + 1 <= n:\n dp2[i + 1] += dp[i]\n if i + 2 <= n:\n dp2[i + 2] += dp[i]\n if dp == dp2:\n return dp2[-1]\n else:\n dp = dp2\n return 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def climbStairs2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 3:\n return n\n one_step_before, two_steps_before = (2, 1)\n all_ways = 0\n for _ in range(3, n + 1):\n all_ways = one_step_before + two_steps_before\n two_steps_before = one_step_before\n one_step_before = all_ways\n return all_ways\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [0] * (n + 1)\n dp[0] = 1\n while True:\n dp2 = [0] * (n + 1)\n dp2[-1] = dp[-1]\n for i in range(n):\n if i + 1 <= n:\n dp2[i + 1] += dp[i]\n if i + 2 <= n:\n dp2[i + 2] += dp[i]\n if dp == dp2:\n return dp2[-1]\n else:\n dp = dp2\n return 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000314", "length_bytes": 945, "license_type": "permissive", "methods": [{"docstring": ":type n: int :rtype: int", "name": "climbStairs", "signature": "def climbStairs(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "climbStairs2", "signature": "def climbStairs2(self, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def climbStairs(self, n): :type n: int :rtype: int\n- def climbStairs2(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def climbStairs(self, n): :type n: int :rtype: int\n- def climbStairs2(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def climbStairs2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 3:\n return n\n one_step_before, two_steps_before = (2, 1)\n all_ways = 0\n for _ in range(3, n + 1):\n all_ways = one_step_before + two_steps_before\n two_steps_before = one_step_before\n one_step_before = all_ways\n return all_ways\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [0] * (n + 1)\n dp[0] = 1\n while True:\n dp2 = [0] * (n + 1)\n dp2[-1] = dp[-1]\n for i in range(n):\n if i + 1 <= n:\n dp2[i + 1] += dp[i]\n if i + 2 <= n:\n dp2[i + 2] += dp[i]\n if dp == dp2:\n return dp2[-1]\n else:\n dp = dp2\n return 0\n<|end_body_1|>\n", "revision_id": "c8bf33af30569177c5276ffcd72a8d93ba4c402a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def climbStairs2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n < 3:\n return n\n one_step_before, two_steps_before = (2, 1)\n all_ways = 0\n for _ in range(3, n + 1):\n all_ways = one_step_before + two_steps_before\n two_steps_before = one_step_before\n one_step_before = all_ways\n return all_ways\n\n def climbStairs2(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n dp = [0] * (n + 1)\n dp[0] = 1\n while True:\n dp2 = [0] * (n + 1)\n dp2[-1] = dp[-1]\n for i in range(n):\n if i + 1 <= n:\n dp2[i + 1] += dp[i]\n if i + 2 <= n:\n dp2[i + 2] += dp[i]\n if dp == dp2:\n return dp2[-1]\n else:\n dp = dp2\n return 0\n", "source": "the_stack_v2_python_sparse", "source_path": "1-100/61-70/70-climbingStairs/climbingStairs.py", "source_repo": "xuychen/Leetcode", "split": "val", "star_events_count": 0} {"blob_id": "a24cd45a4efa46e6ba01bfdbd50b532d64011c01", "bodies": ["allow_speech_tags = [as_text(item) for item in allow_speech_tags]\nself.default_speech_tag_filter = allow_speech_tags\nself.stop_words = set()\nself.stop_words_file = stopwords_path\nif type(stop_words_file) is str:\n self.stop_words_file = stop_words_file\nfor word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):\n self.stop_words.add(word.strip())", "text = as_text(text)\njieba_result = pseg.cut(text)\nif use_speech_tags_filter == True:\n jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]\nelse:\n jieba_result = [w for w in jieba_result]\nword_list = [w.word.strip() for w in jieba_result if w.flag != 'x']\nword_list = [word for word in word_list if len(word) > 0]\nif lower:\n word_list = [word.lower() for word in word_list]\nif use_stop_words:\n word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]\nreturn word_list", "res = []\nfor sentence in sentences:\n res.append(self.segment(text=sentence, lower=lower, use_stop_words=use_stop_words, use_speech_tags_filter=use_speech_tags_filter))\nreturn res"], "bodies_text": "<|body_start_0|>\n allow_speech_tags = [as_text(item) for item in allow_speech_tags]\n self.default_speech_tag_filter = allow_speech_tags\n self.stop_words = set()\n self.stop_words_file = stopwords_path\n if type(stop_words_file) is str:\n self.stop_words_file = stop_words_file\n for word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):\n self.stop_words.add(word.strip())\n<|end_body_0|>\n\n<|body_start_1|>\n text = as_text(text)\n jieba_result = pseg.cut(text)\n if use_speech_tags_filter == True:\n jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]\n else:\n jieba_result = [w for w in jieba_result]\n word_list = [w.word.strip() for w in jieba_result if w.flag != 'x']\n word_list = [word for word in word_list if len(word) > 0]\n if lower:\n word_list = [word.lower() for word in word_list]\n if use_stop_words:\n word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]\n return word_list\n<|end_body_1|>\n\n<|body_start_2|>\n res = []\n for sentence in sentences:\n res.append(self.segment(text=sentence, lower=lower, use_stop_words=use_stop_words, use_speech_tags_filter=use_speech_tags_filter))\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "WordSegmentation", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordSegmentation:\n\n def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags):\n \"\"\"Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\"\"\"\n <|body_0|>\n\n def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\"\"\"\n <|body_1|>\n\n def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n allow_speech_tags = [as_text(item) for item in allow_speech_tags]\n self.default_speech_tag_filter = allow_speech_tags\n self.stop_words = set()\n self.stop_words_file = stopwords_path\n if type(stop_words_file) is str:\n self.stop_words_file = stop_words_file\n for word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):\n self.stop_words.add(word.strip())\n<|end_body_0|>\n\n<|body_start_1|>\n text = as_text(text)\n jieba_result = pseg.cut(text)\n if use_speech_tags_filter == True:\n jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]\n else:\n jieba_result = [w for w in jieba_result]\n word_list = [w.word.strip() for w in jieba_result if w.flag != 'x']\n word_list = [word for word in word_list if len(word) > 0]\n if lower:\n word_list = [word.lower() for word in word_list]\n if use_stop_words:\n word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]\n return word_list\n<|end_body_1|>\n\n<|body_start_2|>\n res = []\n for sentence in sentences:\n res.append(self.segment(text=sentence, lower=lower, use_stop_words=use_stop_words, use_speech_tags_filter=use_speech_tags_filter))\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000315", "length_bytes": 27442, "license_type": "no_license", "methods": [{"docstring": "Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤", "name": "__init__", "signature": "def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags)"}, {"docstring": "对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。", "name": "segment", "signature": "def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False)"}, {"docstring": "将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)", "name": "segment_sentences", "signature": "def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006220", "prompt": "Implement the Python class `WordSegmentation` described below.\n\nClass description:\nImplement the WordSegmentation class.\n\nMethod signatures and docstrings:\n- def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags): Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\n- def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False): 对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\n- def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False): 将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)", "prompted_full_text": "Implement the Python class `WordSegmentation` described below.\n\nClass description:\nImplement the WordSegmentation class.\n\nMethod signatures and docstrings:\n- def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags): Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\n- def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False): 对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\n- def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False): 将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)\n\n<|skeleton|>\nclass WordSegmentation:\n\n def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags):\n \"\"\"Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\"\"\"\n <|body_0|>\n\n def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\"\"\"\n <|body_1|>\n\n def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n allow_speech_tags = [as_text(item) for item in allow_speech_tags]\n self.default_speech_tag_filter = allow_speech_tags\n self.stop_words = set()\n self.stop_words_file = stopwords_path\n if type(stop_words_file) is str:\n self.stop_words_file = stop_words_file\n for word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):\n self.stop_words.add(word.strip())\n<|end_body_0|>\n\n<|body_start_1|>\n text = as_text(text)\n jieba_result = pseg.cut(text)\n if use_speech_tags_filter == True:\n jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]\n else:\n jieba_result = [w for w in jieba_result]\n word_list = [w.word.strip() for w in jieba_result if w.flag != 'x']\n word_list = [word for word in word_list if len(word) > 0]\n if lower:\n word_list = [word.lower() for word in word_list]\n if use_stop_words:\n word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]\n return word_list\n<|end_body_1|>\n\n<|body_start_2|>\n res = []\n for sentence in sentences:\n res.append(self.segment(text=sentence, lower=lower, use_stop_words=use_stop_words, use_speech_tags_filter=use_speech_tags_filter))\n return res\n<|end_body_2|>\n", "revision_id": "815a5706183063522d5a26c321b047ee1ab812cf", "skeleton": "<|skeleton|>\nclass WordSegmentation:\n\n def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags):\n \"\"\"Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\"\"\"\n <|body_0|>\n\n def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\"\"\"\n <|body_1|>\n\n def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WordSegmentation:\n def __init__(self, stop_words_file=None, allow_speech_tags=allow_speech_tags):\n \"\"\"Keyword arguments: stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词 allow_speech_tags -- 词性列表,用于过滤\"\"\"\n allow_speech_tags = [as_text(item) for item in allow_speech_tags]\n self.default_speech_tag_filter = allow_speech_tags\n self.stop_words = set()\n self.stop_words_file = stopwords_path\n if type(stop_words_file) is str:\n self.stop_words_file = stop_words_file\n for word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):\n self.stop_words.add(word.strip())\n\n def segment(self, text, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"对一段文本进行分词,返回list类型的分词结果 Keyword arguments: lower -- 是否将单词小写(针对英文) use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词) use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。\"\"\"\n text = as_text(text)\n jieba_result = pseg.cut(text)\n if use_speech_tags_filter == True:\n jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]\n else:\n jieba_result = [w for w in jieba_result]\n word_list = [w.word.strip() for w in jieba_result if w.flag != 'x']\n word_list = [word for word in word_list if len(word) > 0]\n if lower:\n word_list = [word.lower() for word in word_list]\n if use_stop_words:\n word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]\n return word_list\n\n def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False):\n \"\"\"将列表sequences中的每个元素/句子转换为由单词构成的列表。 sequences -- 列表,每个元素是一个句子(字符串类型)\"\"\"\n res = []\n for sentence in sentences:\n res.append(self.segment(text=sentence, lower=lower, use_stop_words=use_stop_words, use_speech_tags_filter=use_speech_tags_filter))\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "knowledge_graph/information/keywords_extraction.py", "source_repo": "wagaman/deep_learning", "split": "val", "star_events_count": 0} {"blob_id": "ca92d368ab50f16b736a3b6ad0ebb54ab61b9efe", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn ServiceAnnouncementAttachment()", "from .entity import Entity\nfrom .entity import Entity\nfields: Dict[str, Callable[[Any], None]] = {'content': lambda n: setattr(self, 'content', n.get_bytes_value()), 'contentType': lambda n: setattr(self, 'content_type', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'name': lambda n: setattr(self, 'name', n.get_str_value()), 'size': lambda n: setattr(self, 'size', n.get_int_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_bytes_value('content', self.content)\nwriter.write_str_value('contentType', self.content_type)\nwriter.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\nwriter.write_str_value('name', self.name)\nwriter.write_int_value('size', self.size)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceAnnouncementAttachment()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'content': lambda n: setattr(self, 'content', n.get_bytes_value()), 'contentType': lambda n: setattr(self, 'content_type', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'name': lambda n: setattr(self, 'name', n.get_str_value()), 'size': lambda n: setattr(self, 'size', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bytes_value('content', self.content)\n writer.write_str_value('contentType', self.content_type)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('name', self.name)\n writer.write_int_value('size', self.size)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ServiceAnnouncementAttachment", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ServiceAnnouncementAttachment:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceAnnouncementAttachment()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'content': lambda n: setattr(self, 'content', n.get_bytes_value()), 'contentType': lambda n: setattr(self, 'content_type', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'name': lambda n: setattr(self, 'name', n.get_str_value()), 'size': lambda n: setattr(self, 'size', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bytes_value('content', self.content)\n writer.write_str_value('contentType', self.content_type)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('name', self.name)\n writer.write_int_value('size', self.size)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000316", "length_bytes": 2908, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `ServiceAnnouncementAttachment` described below.\n\nClass description:\nImplement the ServiceAnnouncementAttachment class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `ServiceAnnouncementAttachment` described below.\n\nClass description:\nImplement the ServiceAnnouncementAttachment class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass ServiceAnnouncementAttachment:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceAnnouncementAttachment()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'content': lambda n: setattr(self, 'content', n.get_bytes_value()), 'contentType': lambda n: setattr(self, 'content_type', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'name': lambda n: setattr(self, 'name', n.get_str_value()), 'size': lambda n: setattr(self, 'size', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bytes_value('content', self.content)\n writer.write_str_value('contentType', self.content_type)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('name', self.name)\n writer.write_int_value('size', self.size)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass ServiceAnnouncementAttachment:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ServiceAnnouncementAttachment:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ServiceAnnouncementAttachment:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ServiceAnnouncementAttachment\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ServiceAnnouncementAttachment()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'content': lambda n: setattr(self, 'content', n.get_bytes_value()), 'contentType': lambda n: setattr(self, 'content_type', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'name': lambda n: setattr(self, 'name', n.get_str_value()), 'size': lambda n: setattr(self, 'size', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bytes_value('content', self.content)\n writer.write_str_value('contentType', self.content_type)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('name', self.name)\n writer.write_int_value('size', self.size)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/service_announcement_attachment.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "val", "star_events_count": 135} {"blob_id": "447a2628a8fd2f102efa973d12b8218c4926567d", "bodies": ["zk_client = KazooClient(hosts=','.join(zk_locations), connection_retry=ZK_PERSISTENT_RECONNECTS)\nzk_client.start()\nself.ioloop = io_loop\nself.source = source\nself.start_time = None\nself.status = 'Not started'\nself.finish_time = None\nself.api_methods = api_methods.APIMethods(zk_client)\nself.scheduled_indexes = set()\nself.scheduled_jobs = set()\nself.failed_jobs = set()\nself.succeeded_jobs = set()\nself.max_concurrency = max_concurrency\nself.docs_imported = 0", "self.start_time = self.ioloop.time()\nself.status = 'In progress'\nfor project_id, namespace, index in self.source.iter_indexes():\n self.scheduled_indexes.add((project_id, namespace, index))\n self.ioloop.add_callback(self.import_index, project_id, namespace, index)\nlogger.info('Waiting for all import jobs to be completed')\nwhile self.scheduled_indexes or self.scheduled_jobs:\n await gen.sleep(0.25)\nlogger.info('Import has been finished and took {:.2f}s'.format(self.ioloop.time() - self.start_time))\nlogger.info(' - {} jobs failed'.format(len(self.failed_jobs)))\nlogger.info(' - {} jobs succeeded ({} documents)'.format(len(self.succeeded_jobs), self.docs_imported))\nself.status = 'Done'\nself.finish_time = self.ioloop.time()", "logger.info('Starting import of index: {}/{}/{}'.format(project_id, namespace, index))\nfirst_page = True\nfor key in self.source.iter_object_keys(project_id, namespace, index):\n while len(self.scheduled_jobs) > self.max_concurrency:\n await gen.sleep(0.25 * (random.random() + 0.5))\n self.scheduled_jobs.add(key)\n self.ioloop.add_callback(self.import_page, key)\n if first_page:\n await gen.sleep(1)\n first_page = False\nself.scheduled_indexes.remove((project_id, namespace, index))", "logger.debug('Starting import of object: {}'.format(object_key))\nfor attempt in range(self.max_retries):\n try:\n index_documents_pb = self.source.get_index_documents_pb(object_key)\n response = search_pb2.IndexDocumentResponse()\n await self.api_methods.index_document(index_documents_pb, response)\n self.docs_imported += len(index_documents_pb.params.document)\n self.succeeded_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n logger.debug('Successfully imported object: {}'.format(object_key))\n break\n except Exception as err:\n logger.error('Failed to import object with key \"{}\" ({})'.format(object_key, err))\n if attempt < self.max_retries - 1:\n backoff = 0.2 * 2 ** attempt\n logger.info('Retrying in {:.1f}s'.format(backoff))\n await gen.sleep(backoff)\n else:\n self.failed_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n raise"], "bodies_text": "<|body_start_0|>\n zk_client = KazooClient(hosts=','.join(zk_locations), connection_retry=ZK_PERSISTENT_RECONNECTS)\n zk_client.start()\n self.ioloop = io_loop\n self.source = source\n self.start_time = None\n self.status = 'Not started'\n self.finish_time = None\n self.api_methods = api_methods.APIMethods(zk_client)\n self.scheduled_indexes = set()\n self.scheduled_jobs = set()\n self.failed_jobs = set()\n self.succeeded_jobs = set()\n self.max_concurrency = max_concurrency\n self.docs_imported = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.start_time = self.ioloop.time()\n self.status = 'In progress'\n for project_id, namespace, index in self.source.iter_indexes():\n self.scheduled_indexes.add((project_id, namespace, index))\n self.ioloop.add_callback(self.import_index, project_id, namespace, index)\n logger.info('Waiting for all import jobs to be completed')\n while self.scheduled_indexes or self.scheduled_jobs:\n await gen.sleep(0.25)\n logger.info('Import has been finished and took {:.2f}s'.format(self.ioloop.time() - self.start_time))\n logger.info(' - {} jobs failed'.format(len(self.failed_jobs)))\n logger.info(' - {} jobs succeeded ({} documents)'.format(len(self.succeeded_jobs), self.docs_imported))\n self.status = 'Done'\n self.finish_time = self.ioloop.time()\n<|end_body_1|>\n\n<|body_start_2|>\n logger.info('Starting import of index: {}/{}/{}'.format(project_id, namespace, index))\n first_page = True\n for key in self.source.iter_object_keys(project_id, namespace, index):\n while len(self.scheduled_jobs) > self.max_concurrency:\n await gen.sleep(0.25 * (random.random() + 0.5))\n self.scheduled_jobs.add(key)\n self.ioloop.add_callback(self.import_page, key)\n if first_page:\n await gen.sleep(1)\n first_page = False\n self.scheduled_indexes.remove((project_id, namespace, index))\n<|end_body_2|>\n\n<|body_start_3|>\n logger.debug('Starting import of object: {}'.format(object_key))\n for attempt in range(self.max_retries):\n try:\n index_documents_pb = self.source.get_index_documents_pb(object_key)\n response = search_pb2.IndexDocumentResponse()\n await self.api_methods.index_document(index_documents_pb, response)\n self.docs_imported += len(index_documents_pb.params.document)\n self.succeeded_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n logger.debug('Successfully imported object: {}'.format(object_key))\n break\n except Exception as err:\n logger.error('Failed to import object with key \"{}\" ({})'.format(object_key, err))\n if attempt < self.max_retries - 1:\n backoff = 0.2 * 2 ** attempt\n logger.info('Retrying in {:.1f}s'.format(backoff))\n await gen.sleep(backoff)\n else:\n self.failed_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n raise\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Importer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Importer:\n\n def __init__(self, io_loop, source, zk_locations, max_concurrency):\n \"\"\"Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\"\"\"\n <|body_0|>\n\n async def import_(self):\n \"\"\"Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\"\"\"\n <|body_1|>\n\n async def import_index(self, project_id, namespace, index):\n \"\"\"Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\"\"\"\n <|body_2|>\n\n async def import_page(self, object_key):\n \"\"\"Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n zk_client = KazooClient(hosts=','.join(zk_locations), connection_retry=ZK_PERSISTENT_RECONNECTS)\n zk_client.start()\n self.ioloop = io_loop\n self.source = source\n self.start_time = None\n self.status = 'Not started'\n self.finish_time = None\n self.api_methods = api_methods.APIMethods(zk_client)\n self.scheduled_indexes = set()\n self.scheduled_jobs = set()\n self.failed_jobs = set()\n self.succeeded_jobs = set()\n self.max_concurrency = max_concurrency\n self.docs_imported = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.start_time = self.ioloop.time()\n self.status = 'In progress'\n for project_id, namespace, index in self.source.iter_indexes():\n self.scheduled_indexes.add((project_id, namespace, index))\n self.ioloop.add_callback(self.import_index, project_id, namespace, index)\n logger.info('Waiting for all import jobs to be completed')\n while self.scheduled_indexes or self.scheduled_jobs:\n await gen.sleep(0.25)\n logger.info('Import has been finished and took {:.2f}s'.format(self.ioloop.time() - self.start_time))\n logger.info(' - {} jobs failed'.format(len(self.failed_jobs)))\n logger.info(' - {} jobs succeeded ({} documents)'.format(len(self.succeeded_jobs), self.docs_imported))\n self.status = 'Done'\n self.finish_time = self.ioloop.time()\n<|end_body_1|>\n\n<|body_start_2|>\n logger.info('Starting import of index: {}/{}/{}'.format(project_id, namespace, index))\n first_page = True\n for key in self.source.iter_object_keys(project_id, namespace, index):\n while len(self.scheduled_jobs) > self.max_concurrency:\n await gen.sleep(0.25 * (random.random() + 0.5))\n self.scheduled_jobs.add(key)\n self.ioloop.add_callback(self.import_page, key)\n if first_page:\n await gen.sleep(1)\n first_page = False\n self.scheduled_indexes.remove((project_id, namespace, index))\n<|end_body_2|>\n\n<|body_start_3|>\n logger.debug('Starting import of object: {}'.format(object_key))\n for attempt in range(self.max_retries):\n try:\n index_documents_pb = self.source.get_index_documents_pb(object_key)\n response = search_pb2.IndexDocumentResponse()\n await self.api_methods.index_document(index_documents_pb, response)\n self.docs_imported += len(index_documents_pb.params.document)\n self.succeeded_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n logger.debug('Successfully imported object: {}'.format(object_key))\n break\n except Exception as err:\n logger.error('Failed to import object with key \"{}\" ({})'.format(object_key, err))\n if attempt < self.max_retries - 1:\n backoff = 0.2 * 2 ** attempt\n logger.info('Retrying in {:.1f}s'.format(backoff))\n await gen.sleep(backoff)\n else:\n self.failed_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n raise\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000317", "length_bytes": 6138, "license_type": "permissive", "methods": [{"docstring": "Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.", "name": "__init__", "signature": "def __init__(self, io_loop, source, zk_locations, max_concurrency)"}, {"docstring": "Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.", "name": "import_", "signature": "async def import_(self)"}, {"docstring": "Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.", "name": "import_index", "signature": "async def import_index(self, project_id, namespace, index)"}, {"docstring": "Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.", "name": "import_page", "signature": "async def import_page(self, object_key)"}], "n_methods": 4, "prompt": "Implement the Python class `Importer` described below.\n\nClass description:\nImplement the Importer class.\n\nMethod signatures and docstrings:\n- def __init__(self, io_loop, source, zk_locations, max_concurrency): Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\n- async def import_(self): Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\n- async def import_index(self, project_id, namespace, index): Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\n- async def import_page(self, object_key): Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.", "prompted_full_text": "Implement the Python class `Importer` described below.\n\nClass description:\nImplement the Importer class.\n\nMethod signatures and docstrings:\n- def __init__(self, io_loop, source, zk_locations, max_concurrency): Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\n- async def import_(self): Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\n- async def import_index(self, project_id, namespace, index): Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\n- async def import_page(self, object_key): Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.\n\n<|skeleton|>\nclass Importer:\n\n def __init__(self, io_loop, source, zk_locations, max_concurrency):\n \"\"\"Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\"\"\"\n <|body_0|>\n\n async def import_(self):\n \"\"\"Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\"\"\"\n <|body_1|>\n\n async def import_index(self, project_id, namespace, index):\n \"\"\"Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\"\"\"\n <|body_2|>\n\n async def import_page(self, object_key):\n \"\"\"Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n zk_client = KazooClient(hosts=','.join(zk_locations), connection_retry=ZK_PERSISTENT_RECONNECTS)\n zk_client.start()\n self.ioloop = io_loop\n self.source = source\n self.start_time = None\n self.status = 'Not started'\n self.finish_time = None\n self.api_methods = api_methods.APIMethods(zk_client)\n self.scheduled_indexes = set()\n self.scheduled_jobs = set()\n self.failed_jobs = set()\n self.succeeded_jobs = set()\n self.max_concurrency = max_concurrency\n self.docs_imported = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.start_time = self.ioloop.time()\n self.status = 'In progress'\n for project_id, namespace, index in self.source.iter_indexes():\n self.scheduled_indexes.add((project_id, namespace, index))\n self.ioloop.add_callback(self.import_index, project_id, namespace, index)\n logger.info('Waiting for all import jobs to be completed')\n while self.scheduled_indexes or self.scheduled_jobs:\n await gen.sleep(0.25)\n logger.info('Import has been finished and took {:.2f}s'.format(self.ioloop.time() - self.start_time))\n logger.info(' - {} jobs failed'.format(len(self.failed_jobs)))\n logger.info(' - {} jobs succeeded ({} documents)'.format(len(self.succeeded_jobs), self.docs_imported))\n self.status = 'Done'\n self.finish_time = self.ioloop.time()\n<|end_body_1|>\n\n<|body_start_2|>\n logger.info('Starting import of index: {}/{}/{}'.format(project_id, namespace, index))\n first_page = True\n for key in self.source.iter_object_keys(project_id, namespace, index):\n while len(self.scheduled_jobs) > self.max_concurrency:\n await gen.sleep(0.25 * (random.random() + 0.5))\n self.scheduled_jobs.add(key)\n self.ioloop.add_callback(self.import_page, key)\n if first_page:\n await gen.sleep(1)\n first_page = False\n self.scheduled_indexes.remove((project_id, namespace, index))\n<|end_body_2|>\n\n<|body_start_3|>\n logger.debug('Starting import of object: {}'.format(object_key))\n for attempt in range(self.max_retries):\n try:\n index_documents_pb = self.source.get_index_documents_pb(object_key)\n response = search_pb2.IndexDocumentResponse()\n await self.api_methods.index_document(index_documents_pb, response)\n self.docs_imported += len(index_documents_pb.params.document)\n self.succeeded_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n logger.debug('Successfully imported object: {}'.format(object_key))\n break\n except Exception as err:\n logger.error('Failed to import object with key \"{}\" ({})'.format(object_key, err))\n if attempt < self.max_retries - 1:\n backoff = 0.2 * 2 ** attempt\n logger.info('Retrying in {:.1f}s'.format(backoff))\n await gen.sleep(backoff)\n else:\n self.failed_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n raise\n<|end_body_3|>\n", "revision_id": "be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f", "skeleton": "<|skeleton|>\nclass Importer:\n\n def __init__(self, io_loop, source, zk_locations, max_concurrency):\n \"\"\"Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\"\"\"\n <|body_0|>\n\n async def import_(self):\n \"\"\"Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\"\"\"\n <|body_1|>\n\n async def import_index(self, project_id, namespace, index):\n \"\"\"Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\"\"\"\n <|body_2|>\n\n async def import_page(self, object_key):\n \"\"\"Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Importer:\n def __init__(self, io_loop, source, zk_locations, max_concurrency):\n \"\"\"Args: io_loop: an instance of tornado IOLoop. source: an instance of import Source (e.g.: S3Source). zk_locations: a list - Zookeeper locations. max_concurrency: an int - maximum number of concurrent jobs.\"\"\"\n zk_client = KazooClient(hosts=','.join(zk_locations), connection_retry=ZK_PERSISTENT_RECONNECTS)\n zk_client.start()\n self.ioloop = io_loop\n self.source = source\n self.start_time = None\n self.status = 'Not started'\n self.finish_time = None\n self.api_methods = api_methods.APIMethods(zk_client)\n self.scheduled_indexes = set()\n self.scheduled_jobs = set()\n self.failed_jobs = set()\n self.succeeded_jobs = set()\n self.max_concurrency = max_concurrency\n self.docs_imported = 0\n\n async def import_(self):\n \"\"\"Starts concurrent jobs for importing all search indexes. Then it waits for all started jobs to be completed.\"\"\"\n self.start_time = self.ioloop.time()\n self.status = 'In progress'\n for project_id, namespace, index in self.source.iter_indexes():\n self.scheduled_indexes.add((project_id, namespace, index))\n self.ioloop.add_callback(self.import_index, project_id, namespace, index)\n logger.info('Waiting for all import jobs to be completed')\n while self.scheduled_indexes or self.scheduled_jobs:\n await gen.sleep(0.25)\n logger.info('Import has been finished and took {:.2f}s'.format(self.ioloop.time() - self.start_time))\n logger.info(' - {} jobs failed'.format(len(self.failed_jobs)))\n logger.info(' - {} jobs succeeded ({} documents)'.format(len(self.succeeded_jobs), self.docs_imported))\n self.status = 'Done'\n self.finish_time = self.ioloop.time()\n\n async def import_index(self, project_id, namespace, index):\n \"\"\"Starts concurrent jobs for importing entire index. import_ method will wait for these jobs to be completed. Args: project_id: a str - GAE project ID. namespace: a str - GAE search service namespace. index: a str - GAE Search index name.\"\"\"\n logger.info('Starting import of index: {}/{}/{}'.format(project_id, namespace, index))\n first_page = True\n for key in self.source.iter_object_keys(project_id, namespace, index):\n while len(self.scheduled_jobs) > self.max_concurrency:\n await gen.sleep(0.25 * (random.random() + 0.5))\n self.scheduled_jobs.add(key)\n self.ioloop.add_callback(self.import_page, key)\n if first_page:\n await gen.sleep(1)\n first_page = False\n self.scheduled_indexes.remove((project_id, namespace, index))\n\n async def import_page(self, object_key):\n \"\"\"Imports a single object from backup. An object is instance of search_pb2.IndexDocumentResponse. Args: object_key: a str - storage object identifier.\"\"\"\n logger.debug('Starting import of object: {}'.format(object_key))\n for attempt in range(self.max_retries):\n try:\n index_documents_pb = self.source.get_index_documents_pb(object_key)\n response = search_pb2.IndexDocumentResponse()\n await self.api_methods.index_document(index_documents_pb, response)\n self.docs_imported += len(index_documents_pb.params.document)\n self.succeeded_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n logger.debug('Successfully imported object: {}'.format(object_key))\n break\n except Exception as err:\n logger.error('Failed to import object with key \"{}\" ({})'.format(object_key, err))\n if attempt < self.max_retries - 1:\n backoff = 0.2 * 2 ** attempt\n logger.info('Retrying in {:.1f}s'.format(backoff))\n await gen.sleep(backoff)\n else:\n self.failed_jobs.add(object_key)\n self.scheduled_jobs.remove(object_key)\n raise\n", "source": "the_stack_v2_python_sparse", "source_path": "SearchService2/appscale/search/backup_restore/restore_to_v2.py", "source_repo": "obino/appscale", "split": "val", "star_events_count": 1} {"blob_id": "f5d078121353f9dd3853bde1c9a9f8f151c46654", "bodies": ["cls.__name__ = str(name + 'Spec')\ncls.name = name\ncls.xmlType = xmlType\ncls.needGenerating = True\ncls.enumList = enumList", "simpleType = ET.SubElement(xsdNode, 'xsd:simpleType')\nsimpleType.set('name', cls.getXMLType())\nrestriction = ET.SubElement(simpleType, 'xsd:restriction')\nrestriction.set('base', 'xsd:string')\nfor enum in cls.enumList:\n enumNode = ET.SubElement(restriction, 'xsd:enumeration')\n enumNode.set('value', enum)"], "bodies_text": "<|body_start_0|>\n cls.__name__ = str(name + 'Spec')\n cls.name = name\n cls.xmlType = xmlType\n cls.needGenerating = True\n cls.enumList = enumList\n<|end_body_0|>\n\n<|body_start_1|>\n simpleType = ET.SubElement(xsdNode, 'xsd:simpleType')\n simpleType.set('name', cls.getXMLType())\n restriction = ET.SubElement(simpleType, 'xsd:restriction')\n restriction.set('base', 'xsd:string')\n for enum in cls.enumList:\n enumNode = ET.SubElement(restriction, 'xsd:enumeration')\n enumNode.set('value', enum)\n<|end_body_1|>\n", "class_docstring": "A type that allows a set list of strings", "class_name": "EnumBaseType", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnumBaseType:\n \"\"\"A type that allows a set list of strings\"\"\"\n\n def createClass(cls, name, xmlType, enumList):\n \"\"\"creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\"\"\"\n <|body_0|>\n\n def generateXML(cls, xsdNode):\n \"\"\"Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.__name__ = str(name + 'Spec')\n cls.name = name\n cls.xmlType = xmlType\n cls.needGenerating = True\n cls.enumList = enumList\n<|end_body_0|>\n\n<|body_start_1|>\n simpleType = ET.SubElement(xsdNode, 'xsd:simpleType')\n simpleType.set('name', cls.getXMLType())\n restriction = ET.SubElement(simpleType, 'xsd:restriction')\n restriction.set('base', 'xsd:string')\n for enum in cls.enumList:\n enumNode = ET.SubElement(restriction, 'xsd:enumeration')\n enumNode.set('value', enum)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000318", "length_bytes": 16966, "license_type": "permissive", "methods": [{"docstring": "creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None", "name": "createClass", "signature": "def createClass(cls, name, xmlType, enumList)"}, {"docstring": "Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None", "name": "generateXML", "signature": "def generateXML(cls, xsdNode)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004715", "prompt": "Implement the Python class `EnumBaseType` described below.\n\nClass description:\nA type that allows a set list of strings\n\nMethod signatures and docstrings:\n- def createClass(cls, name, xmlType, enumList): creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\n- def generateXML(cls, xsdNode): Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None", "prompted_full_text": "Implement the Python class `EnumBaseType` described below.\n\nClass description:\nA type that allows a set list of strings\n\nMethod signatures and docstrings:\n- def createClass(cls, name, xmlType, enumList): creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\n- def generateXML(cls, xsdNode): Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None\n\n<|skeleton|>\nclass EnumBaseType:\n \"\"\"A type that allows a set list of strings\"\"\"\n\n def createClass(cls, name, xmlType, enumList):\n \"\"\"creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\"\"\"\n <|body_0|>\n\n def generateXML(cls, xsdNode):\n \"\"\"Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.__name__ = str(name + 'Spec')\n cls.name = name\n cls.xmlType = xmlType\n cls.needGenerating = True\n cls.enumList = enumList\n<|end_body_0|>\n\n<|body_start_1|>\n simpleType = ET.SubElement(xsdNode, 'xsd:simpleType')\n simpleType.set('name', cls.getXMLType())\n restriction = ET.SubElement(simpleType, 'xsd:restriction')\n restriction.set('base', 'xsd:string')\n for enum in cls.enumList:\n enumNode = ET.SubElement(restriction, 'xsd:enumeration')\n enumNode.set('value', enum)\n<|end_body_1|>\n", "revision_id": "fbee9e3def3c1ee576d1af85f3258cc816ceaaaf", "skeleton": "<|skeleton|>\nclass EnumBaseType:\n \"\"\"A type that allows a set list of strings\"\"\"\n\n def createClass(cls, name, xmlType, enumList):\n \"\"\"creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\"\"\"\n <|body_0|>\n\n def generateXML(cls, xsdNode):\n \"\"\"Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EnumBaseType:\n \"\"\"A type that allows a set list of strings\"\"\"\n\n def createClass(cls, name, xmlType, enumList):\n \"\"\"creates a new enumeration type. @ In, name, string, the name of the type @ In, xmlType, string, the name used for the xml type. @ In, enumList, [string], a list of allowable strings. @ Out, None\"\"\"\n cls.__name__ = str(name + 'Spec')\n cls.name = name\n cls.xmlType = xmlType\n cls.needGenerating = True\n cls.enumList = enumList\n\n def generateXML(cls, xsdNode):\n \"\"\"Generates the xml data. @ In, xsdNode, xml.etree.ElementTree.Element, the element to add the new xml type to. @ Out, None\"\"\"\n simpleType = ET.SubElement(xsdNode, 'xsd:simpleType')\n simpleType.set('name', cls.getXMLType())\n restriction = ET.SubElement(simpleType, 'xsd:restriction')\n restriction.set('base', 'xsd:string')\n for enum in cls.enumList:\n enumNode = ET.SubElement(restriction, 'xsd:enumeration')\n enumNode.set('value', enum)\n", "source": "the_stack_v2_python_sparse", "source_path": "framework/utils/InputData.py", "source_repo": "jbae11/raven", "split": "val", "star_events_count": 0} {"blob_id": "5d00cb4e3ac0b2a37666d5e7d10fc1a38e9570d9", "bodies": ["nums = [i for i in range(n)]\nindex = 0\nwhile n > 1:\n index = (index + m - 1) % n\n nums.pop(index)\n n -= 1\nreturn nums[0]", "if n == 1:\n return 0\nreturn (self.lastRemaining2(n - 1, m) + m) % n"], "bodies_text": "<|body_start_0|>\n nums = [i for i in range(n)]\n index = 0\n while n > 1:\n index = (index + m - 1) % n\n nums.pop(index)\n n -= 1\n return nums[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 0\n return (self.lastRemaining2(n - 1, m) + m) % n\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def lastRemaining(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_0|>\n\n def lastRemaining2(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nums = [i for i in range(n)]\n index = 0\n while n > 1:\n index = (index + m - 1) % n\n nums.pop(index)\n n -= 1\n return nums[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 0\n return (self.lastRemaining2(n - 1, m) + m) % n\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000319", "length_bytes": 731, "license_type": "no_license", "methods": [{"docstring": ":type n: int :type m: int :rtype: int", "name": "lastRemaining", "signature": "def lastRemaining(self, n, m)"}, {"docstring": ":type n: int :type m: int :rtype: int", "name": "lastRemaining2", "signature": "def lastRemaining2(self, n, m)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004480", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lastRemaining(self, n, m): :type n: int :type m: int :rtype: int\n- def lastRemaining2(self, n, m): :type n: int :type m: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lastRemaining(self, n, m): :type n: int :type m: int :rtype: int\n- def lastRemaining2(self, n, m): :type n: int :type m: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def lastRemaining(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_0|>\n\n def lastRemaining2(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nums = [i for i in range(n)]\n index = 0\n while n > 1:\n index = (index + m - 1) % n\n nums.pop(index)\n n -= 1\n return nums[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 1:\n return 0\n return (self.lastRemaining2(n - 1, m) + m) % n\n<|end_body_1|>\n", "revision_id": "690b685048c8e89d26047b6bc48b5f9af7d59cbb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def lastRemaining(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_0|>\n\n def lastRemaining2(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def lastRemaining(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n nums = [i for i in range(n)]\n index = 0\n while n > 1:\n index = (index + m - 1) % n\n nums.pop(index)\n n -= 1\n return nums[0]\n\n def lastRemaining2(self, n, m):\n \"\"\":type n: int :type m: int :rtype: int\"\"\"\n if n == 1:\n return 0\n return (self.lastRemaining2(n - 1, m) + m) % n\n", "source": "the_stack_v2_python_sparse", "source_path": "剑指offer题/剑指 Offer 62. 圆圈中最后剩下的数字.py", "source_repo": "SimmonsChen/LeetCode", "split": "val", "star_events_count": 0} {"blob_id": "2fe714f0d5f031224ad4833853eb0a4584c4e254", "bodies": ["self.dim_input = dim_input\nself.dim_output = dim_output\nself.layer_sizes = layer_sizes\nself.activation_fn = activation_fn", "weights = {}\nwith tf.variable_scope(scope):\n weights['w_0'] = tf.Variable(tf.truncated_normal([self.dim_input, self.layer_sizes[0]], stddev=0.1), name='w_0')\n weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')\n for i in range(1, len(self.layer_sizes)):\n weights['w_%d' % i] = tf.Variable(tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1), name='w_%d' % i)\n weights['b_%d' % i] = tf.Variable(tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)\n weights['w_out'] = tf.Variable(tf.truncated_normal([self.layer_sizes[-1], self.dim_output], stddev=0.1), name='w_out')\n weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')\nreturn weights", "num_layers = len(self.layer_sizes)\nwith tf.name_scope(scope):\n hidden = self.activation_fn(tf.nn.xw_plus_b(network_input, weights['w_0'], weights['b_0'], name='hidden_0'))\n for i in range(1, num_layers):\n hidden = self.activation_fn(tf.nn.xw_plus_b(hidden, weights['w_%d' % i], weights['b_%d' % i], name='hidden_%d' % i))\n return tf.nn.xw_plus_b(hidden, weights['w_out'], weights['b_out'], name='output')"], "bodies_text": "<|body_start_0|>\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.layer_sizes = layer_sizes\n self.activation_fn = activation_fn\n<|end_body_0|>\n\n<|body_start_1|>\n weights = {}\n with tf.variable_scope(scope):\n weights['w_0'] = tf.Variable(tf.truncated_normal([self.dim_input, self.layer_sizes[0]], stddev=0.1), name='w_0')\n weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')\n for i in range(1, len(self.layer_sizes)):\n weights['w_%d' % i] = tf.Variable(tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1), name='w_%d' % i)\n weights['b_%d' % i] = tf.Variable(tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)\n weights['w_out'] = tf.Variable(tf.truncated_normal([self.layer_sizes[-1], self.dim_output], stddev=0.1), name='w_out')\n weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')\n return weights\n<|end_body_1|>\n\n<|body_start_2|>\n num_layers = len(self.layer_sizes)\n with tf.name_scope(scope):\n hidden = self.activation_fn(tf.nn.xw_plus_b(network_input, weights['w_0'], weights['b_0'], name='hidden_0'))\n for i in range(1, num_layers):\n hidden = self.activation_fn(tf.nn.xw_plus_b(hidden, weights['w_%d' % i], weights['b_%d' % i], name='hidden_%d' % i))\n return tf.nn.xw_plus_b(hidden, weights['w_out'], weights['b_out'], name='output')\n<|end_body_2|>\n", "class_docstring": "Generator for fully connected networks.", "class_name": "FullyConnectedNetworkGenerator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FullyConnectedNetworkGenerator:\n \"\"\"Generator for fully connected networks.\"\"\"\n\n def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh):\n \"\"\"Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\"\"\"\n <|body_0|>\n\n def construct_network_weights(self, scope='weights'):\n \"\"\"Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\"\"\"\n <|body_1|>\n\n def construct_network(self, network_input, weights, scope='network'):\n \"\"\"Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.layer_sizes = layer_sizes\n self.activation_fn = activation_fn\n<|end_body_0|>\n\n<|body_start_1|>\n weights = {}\n with tf.variable_scope(scope):\n weights['w_0'] = tf.Variable(tf.truncated_normal([self.dim_input, self.layer_sizes[0]], stddev=0.1), name='w_0')\n weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')\n for i in range(1, len(self.layer_sizes)):\n weights['w_%d' % i] = tf.Variable(tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1), name='w_%d' % i)\n weights['b_%d' % i] = tf.Variable(tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)\n weights['w_out'] = tf.Variable(tf.truncated_normal([self.layer_sizes[-1], self.dim_output], stddev=0.1), name='w_out')\n weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')\n return weights\n<|end_body_1|>\n\n<|body_start_2|>\n num_layers = len(self.layer_sizes)\n with tf.name_scope(scope):\n hidden = self.activation_fn(tf.nn.xw_plus_b(network_input, weights['w_0'], weights['b_0'], name='hidden_0'))\n for i in range(1, num_layers):\n hidden = self.activation_fn(tf.nn.xw_plus_b(hidden, weights['w_%d' % i], weights['b_%d' % i], name='hidden_%d' % i))\n return tf.nn.xw_plus_b(hidden, weights['w_out'], weights['b_out'], name='output')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000320", "length_bytes": 6936, "license_type": "permissive", "methods": [{"docstring": "Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers", "name": "__init__", "signature": "def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh)"}, {"docstring": "Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).", "name": "construct_network_weights", "signature": "def construct_network_weights(self, scope='weights')"}, {"docstring": "Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op", "name": "construct_network", "signature": "def construct_network(self, network_input, weights, scope='network')"}], "n_methods": 3, "prompt": "Implement the Python class `FullyConnectedNetworkGenerator` described below.\n\nClass description:\nGenerator for fully connected networks.\n\nMethod signatures and docstrings:\n- def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh): Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\n- def construct_network_weights(self, scope='weights'): Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\n- def construct_network(self, network_input, weights, scope='network'): Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op", "prompted_full_text": "Implement the Python class `FullyConnectedNetworkGenerator` described below.\n\nClass description:\nGenerator for fully connected networks.\n\nMethod signatures and docstrings:\n- def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh): Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\n- def construct_network_weights(self, scope='weights'): Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\n- def construct_network(self, network_input, weights, scope='network'): Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op\n\n<|skeleton|>\nclass FullyConnectedNetworkGenerator:\n \"\"\"Generator for fully connected networks.\"\"\"\n\n def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh):\n \"\"\"Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\"\"\"\n <|body_0|>\n\n def construct_network_weights(self, scope='weights'):\n \"\"\"Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\"\"\"\n <|body_1|>\n\n def construct_network(self, network_input, weights, scope='network'):\n \"\"\"Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.layer_sizes = layer_sizes\n self.activation_fn = activation_fn\n<|end_body_0|>\n\n<|body_start_1|>\n weights = {}\n with tf.variable_scope(scope):\n weights['w_0'] = tf.Variable(tf.truncated_normal([self.dim_input, self.layer_sizes[0]], stddev=0.1), name='w_0')\n weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')\n for i in range(1, len(self.layer_sizes)):\n weights['w_%d' % i] = tf.Variable(tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1), name='w_%d' % i)\n weights['b_%d' % i] = tf.Variable(tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)\n weights['w_out'] = tf.Variable(tf.truncated_normal([self.layer_sizes[-1], self.dim_output], stddev=0.1), name='w_out')\n weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')\n return weights\n<|end_body_1|>\n\n<|body_start_2|>\n num_layers = len(self.layer_sizes)\n with tf.name_scope(scope):\n hidden = self.activation_fn(tf.nn.xw_plus_b(network_input, weights['w_0'], weights['b_0'], name='hidden_0'))\n for i in range(1, num_layers):\n hidden = self.activation_fn(tf.nn.xw_plus_b(hidden, weights['w_%d' % i], weights['b_%d' % i], name='hidden_%d' % i))\n return tf.nn.xw_plus_b(hidden, weights['w_out'], weights['b_out'], name='output')\n<|end_body_2|>\n", "revision_id": "dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9", "skeleton": "<|skeleton|>\nclass FullyConnectedNetworkGenerator:\n \"\"\"Generator for fully connected networks.\"\"\"\n\n def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh):\n \"\"\"Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\"\"\"\n <|body_0|>\n\n def construct_network_weights(self, scope='weights'):\n \"\"\"Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\"\"\"\n <|body_1|>\n\n def construct_network(self, network_input, weights, scope='network'):\n \"\"\"Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FullyConnectedNetworkGenerator:\n \"\"\"Generator for fully connected networks.\"\"\"\n\n def __init__(self, dim_input=1, dim_output=1, layer_sizes=(64,), activation_fn=tf.nn.tanh):\n \"\"\"Creates fully connected neural networks. Args: dim_input: Dimensionality of input (integer > 0). dim_output: Dimensionality of output (integer > 0). layer_sizes: non-empty list with number of neurons per internal layer. activation_fn: activation function for hidden layers\"\"\"\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.layer_sizes = layer_sizes\n self.activation_fn = activation_fn\n\n def construct_network_weights(self, scope='weights'):\n \"\"\"Creates weights for fully connected neural network. Args: scope: variable scope Returns: A dict with weights (network parameters).\"\"\"\n weights = {}\n with tf.variable_scope(scope):\n weights['w_0'] = tf.Variable(tf.truncated_normal([self.dim_input, self.layer_sizes[0]], stddev=0.1), name='w_0')\n weights['b_0'] = tf.Variable(tf.zeros([self.layer_sizes[0]]), name='b_0')\n for i in range(1, len(self.layer_sizes)):\n weights['w_%d' % i] = tf.Variable(tf.truncated_normal([self.layer_sizes[i - 1], self.layer_sizes[i]], stddev=0.1), name='w_%d' % i)\n weights['b_%d' % i] = tf.Variable(tf.zeros([self.layer_sizes[i]]), name='b_%d' % i)\n weights['w_out'] = tf.Variable(tf.truncated_normal([self.layer_sizes[-1], self.dim_output], stddev=0.1), name='w_out')\n weights['b_out'] = tf.Variable(tf.zeros([self.dim_output]), name='b_out')\n return weights\n\n def construct_network(self, network_input, weights, scope='network'):\n \"\"\"Creates a fully connected neural network with given weights and input. Args: network_input: Network input (1d). weights: network parameters (see construct_network_weights). scope: name scope. Returns: neural network output op\"\"\"\n num_layers = len(self.layer_sizes)\n with tf.name_scope(scope):\n hidden = self.activation_fn(tf.nn.xw_plus_b(network_input, weights['w_0'], weights['b_0'], name='hidden_0'))\n for i in range(1, num_layers):\n hidden = self.activation_fn(tf.nn.xw_plus_b(hidden, weights['w_%d' % i], weights['b_%d' % i], name='hidden_%d' % i))\n return tf.nn.xw_plus_b(hidden, weights['w_out'], weights['b_out'], name='output')\n", "source": "the_stack_v2_python_sparse", "source_path": "norml/networks.py", "source_repo": "Tarkiyah/googleResearch", "split": "val", "star_events_count": 11} {"blob_id": "5f830f5602d4da0a1765d3ec9e9a2edde75c0d3c", "bodies": ["context = super(ModeratorView, self).get_context_data(**kwargs)\ngroup_id = self.kwargs.get('group')\nif group_id:\n context['group'] = get_object_or_404(Group, pk=group_id)\nreturn context", "queryset = self.request.user.messages_to_moderate\ngroup = self.kwargs.get('group')\nif group:\n queryset = queryset.filter(thread__group_id=group)\nqueryset = queryset.select_related('thread', 'thread__group', 'thread__group__group', 'sender')\nreturn queryset"], "bodies_text": "<|body_start_0|>\n context = super(ModeratorView, self).get_context_data(**kwargs)\n group_id = self.kwargs.get('group')\n if group_id:\n context['group'] = get_object_or_404(Group, pk=group_id)\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.request.user.messages_to_moderate\n group = self.kwargs.get('group')\n if group:\n queryset = queryset.filter(thread__group_id=group)\n queryset = queryset.select_related('thread', 'thread__group', 'thread__group__group', 'sender')\n return queryset\n<|end_body_1|>\n", "class_docstring": "View that handles viewing messages to be moderated", "class_name": "ModeratorView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModeratorView:\n \"\"\"View that handles viewing messages to be moderated\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional context to the moderation page\"\"\"\n <|body_0|>\n\n def get_queryset(self):\n \"\"\"Get the queryset for the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = super(ModeratorView, self).get_context_data(**kwargs)\n group_id = self.kwargs.get('group')\n if group_id:\n context['group'] = get_object_or_404(Group, pk=group_id)\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.request.user.messages_to_moderate\n group = self.kwargs.get('group')\n if group:\n queryset = queryset.filter(thread__group_id=group)\n queryset = queryset.select_related('thread', 'thread__group', 'thread__group__group', 'sender')\n return queryset\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000321", "length_bytes": 6036, "license_type": "permissive", "methods": [{"docstring": "Add additional context to the moderation page", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}, {"docstring": "Get the queryset for the page", "name": "get_queryset", "signature": "def get_queryset(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005714", "prompt": "Implement the Python class `ModeratorView` described below.\n\nClass description:\nView that handles viewing messages to be moderated\n\nMethod signatures and docstrings:\n- def get_context_data(self, **kwargs): Add additional context to the moderation page\n- def get_queryset(self): Get the queryset for the page", "prompted_full_text": "Implement the Python class `ModeratorView` described below.\n\nClass description:\nView that handles viewing messages to be moderated\n\nMethod signatures and docstrings:\n- def get_context_data(self, **kwargs): Add additional context to the moderation page\n- def get_queryset(self): Get the queryset for the page\n\n<|skeleton|>\nclass ModeratorView:\n \"\"\"View that handles viewing messages to be moderated\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional context to the moderation page\"\"\"\n <|body_0|>\n\n def get_queryset(self):\n \"\"\"Get the queryset for the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context = super(ModeratorView, self).get_context_data(**kwargs)\n group_id = self.kwargs.get('group')\n if group_id:\n context['group'] = get_object_or_404(Group, pk=group_id)\n return context\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.request.user.messages_to_moderate\n group = self.kwargs.get('group')\n if group:\n queryset = queryset.filter(thread__group_id=group)\n queryset = queryset.select_related('thread', 'thread__group', 'thread__group__group', 'sender')\n return queryset\n<|end_body_1|>\n", "revision_id": "a56c0f89df82694bf5db32a04d8b092974791972", "skeleton": "<|skeleton|>\nclass ModeratorView:\n \"\"\"View that handles viewing messages to be moderated\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional context to the moderation page\"\"\"\n <|body_0|>\n\n def get_queryset(self):\n \"\"\"Get the queryset for the page\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModeratorView:\n \"\"\"View that handles viewing messages to be moderated\"\"\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional context to the moderation page\"\"\"\n context = super(ModeratorView, self).get_context_data(**kwargs)\n group_id = self.kwargs.get('group')\n if group_id:\n context['group'] = get_object_or_404(Group, pk=group_id)\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for the page\"\"\"\n queryset = self.request.user.messages_to_moderate\n group = self.kwargs.get('group')\n if group:\n queryset = queryset.filter(thread__group_id=group)\n queryset = queryset.select_related('thread', 'thread__group', 'thread__group__group', 'sender')\n return queryset\n", "source": "the_stack_v2_python_sparse", "source_path": "open_connect/moderation/views.py", "source_repo": "ofa/connect", "split": "val", "star_events_count": 66} {"blob_id": "f06e771e9f899c4360ad4a00b782240289e7ec4a", "bodies": ["dct = super(RasterParam, self).parse(string)\nraster_type = dct['type']\npath = dct['path']\nif raster_type == 'DEM':\n return DEM(path)\nelif raster_type == 'Ifg':\n return Ifg(path)\nelif raster_type == 'Incidence':\n return Incidence(path)\nelse:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')", "path = rasterBase.data_path\nif isinstance(rasterBase, DEM):\n d = {'type': 'DEM', 'path': path}\nelif isinstance(rasterBase, Ifg):\n d = {'type': 'Ifg', 'path': path}\nelif isinstance(rasterBase, Incidence):\n d = {'type': 'Incidence', 'path': path}\nelse:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\nreturn super(RasterParam, self).serialize(d)"], "bodies_text": "<|body_start_0|>\n dct = super(RasterParam, self).parse(string)\n raster_type = dct['type']\n path = dct['path']\n if raster_type == 'DEM':\n return DEM(path)\n elif raster_type == 'Ifg':\n return Ifg(path)\n elif raster_type == 'Incidence':\n return Incidence(path)\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n<|end_body_0|>\n\n<|body_start_1|>\n path = rasterBase.data_path\n if isinstance(rasterBase, DEM):\n d = {'type': 'DEM', 'path': path}\n elif isinstance(rasterBase, Ifg):\n d = {'type': 'Ifg', 'path': path}\n elif isinstance(rasterBase, Incidence):\n d = {'type': 'Incidence', 'path': path}\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n return super(RasterParam, self).serialize(d)\n<|end_body_1|>\n", "class_docstring": "Parameter representing a :py:class:`pyrate.shared.RasterBase` sub class.", "class_name": "RasterParam", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RasterParam:\n \"\"\"Parameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\"\"\"\n\n def parse(self, string):\n \"\"\"override of :py:meth:`DictParam.parse`.\"\"\"\n <|body_0|>\n\n def serialize(self, rasterBase):\n \"\"\"override of :py:meth:`DictParam.serialize`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dct = super(RasterParam, self).parse(string)\n raster_type = dct['type']\n path = dct['path']\n if raster_type == 'DEM':\n return DEM(path)\n elif raster_type == 'Ifg':\n return Ifg(path)\n elif raster_type == 'Incidence':\n return Incidence(path)\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n<|end_body_0|>\n\n<|body_start_1|>\n path = rasterBase.data_path\n if isinstance(rasterBase, DEM):\n d = {'type': 'DEM', 'path': path}\n elif isinstance(rasterBase, Ifg):\n d = {'type': 'Ifg', 'path': path}\n elif isinstance(rasterBase, Incidence):\n d = {'type': 'Incidence', 'path': path}\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n return super(RasterParam, self).serialize(d)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000322", "length_bytes": 6632, "license_type": "permissive", "methods": [{"docstring": "override of :py:meth:`DictParam.parse`.", "name": "parse", "signature": "def parse(self, string)"}, {"docstring": "override of :py:meth:`DictParam.serialize`.", "name": "serialize", "signature": "def serialize(self, rasterBase)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000077", "prompt": "Implement the Python class `RasterParam` described below.\n\nClass description:\nParameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\n\nMethod signatures and docstrings:\n- def parse(self, string): override of :py:meth:`DictParam.parse`.\n- def serialize(self, rasterBase): override of :py:meth:`DictParam.serialize`.", "prompted_full_text": "Implement the Python class `RasterParam` described below.\n\nClass description:\nParameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\n\nMethod signatures and docstrings:\n- def parse(self, string): override of :py:meth:`DictParam.parse`.\n- def serialize(self, rasterBase): override of :py:meth:`DictParam.serialize`.\n\n<|skeleton|>\nclass RasterParam:\n \"\"\"Parameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\"\"\"\n\n def parse(self, string):\n \"\"\"override of :py:meth:`DictParam.parse`.\"\"\"\n <|body_0|>\n\n def serialize(self, rasterBase):\n \"\"\"override of :py:meth:`DictParam.serialize`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dct = super(RasterParam, self).parse(string)\n raster_type = dct['type']\n path = dct['path']\n if raster_type == 'DEM':\n return DEM(path)\n elif raster_type == 'Ifg':\n return Ifg(path)\n elif raster_type == 'Incidence':\n return Incidence(path)\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n<|end_body_0|>\n\n<|body_start_1|>\n path = rasterBase.data_path\n if isinstance(rasterBase, DEM):\n d = {'type': 'DEM', 'path': path}\n elif isinstance(rasterBase, Ifg):\n d = {'type': 'Ifg', 'path': path}\n elif isinstance(rasterBase, Incidence):\n d = {'type': 'Incidence', 'path': path}\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n return super(RasterParam, self).serialize(d)\n<|end_body_1|>\n", "revision_id": "1da92f1446dc821ee7d8d0d1049c10f4ec08d1f6", "skeleton": "<|skeleton|>\nclass RasterParam:\n \"\"\"Parameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\"\"\"\n\n def parse(self, string):\n \"\"\"override of :py:meth:`DictParam.parse`.\"\"\"\n <|body_0|>\n\n def serialize(self, rasterBase):\n \"\"\"override of :py:meth:`DictParam.serialize`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RasterParam:\n \"\"\"Parameter representing a :py:class:`pyrate.shared.RasterBase` sub class.\"\"\"\n\n def parse(self, string):\n \"\"\"override of :py:meth:`DictParam.parse`.\"\"\"\n dct = super(RasterParam, self).parse(string)\n raster_type = dct['type']\n path = dct['path']\n if raster_type == 'DEM':\n return DEM(path)\n elif raster_type == 'Ifg':\n return Ifg(path)\n elif raster_type == 'Incidence':\n return Incidence(path)\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n\n def serialize(self, rasterBase):\n \"\"\"override of :py:meth:`DictParam.serialize`.\"\"\"\n path = rasterBase.data_path\n if isinstance(rasterBase, DEM):\n d = {'type': 'DEM', 'path': path}\n elif isinstance(rasterBase, Ifg):\n d = {'type': 'Ifg', 'path': path}\n elif isinstance(rasterBase, Incidence):\n d = {'type': 'Incidence', 'path': path}\n else:\n raise luigi.parameter.UnknownParameterException('rasterBase must be an inscance DEM, Ifg or Incidence is valid')\n return super(RasterParam, self).serialize(d)\n", "source": "the_stack_v2_python_sparse", "source_path": "pyrate/tasks/utils.py", "source_repo": "andretheronsa/PyRate", "split": "val", "star_events_count": 0} {"blob_id": "cf0f1d32e5913e5e367b061f753b062971421ffa", "bodies": ["s = str(num)\na = b = 1\nfor i in range(2, len(s) + 1):\n a, b = (a + b if '10' <= s[i - 2:i] <= '25' else a, a)\nreturn a", "a = b = 1\ny = num % 10\nwhile num != 0:\n num //= 10\n x = num % 10\n a, b = (a + b if 10 <= 10 * x + y <= 25 else a, a)\n y = x\nreturn a"], "bodies_text": "<|body_start_0|>\n s = str(num)\n a = b = 1\n for i in range(2, len(s) + 1):\n a, b = (a + b if '10' <= s[i - 2:i] <= '25' else a, a)\n return a\n<|end_body_0|>\n\n<|body_start_1|>\n a = b = 1\n y = num % 10\n while num != 0:\n num //= 10\n x = num % 10\n a, b = (a + b if 10 <= 10 * x + y <= 25 else a, a)\n y = x\n return a\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def translateNum_1(self, num: int) -> int:\n \"\"\"方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\"\"\"\n <|body_0|>\n\n def translateNum_2(self, num: int) -> int:\n \"\"\"方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = str(num)\n a = b = 1\n for i in range(2, len(s) + 1):\n a, b = (a + b if '10' <= s[i - 2:i] <= '25' else a, a)\n return a\n<|end_body_0|>\n\n<|body_start_1|>\n a = b = 1\n y = num % 10\n while num != 0:\n num //= 10\n x = num % 10\n a, b = (a + b if 10 <= 10 * x + y <= 25 else a, a)\n y = x\n return a\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000323", "length_bytes": 1777, "license_type": "no_license", "methods": [{"docstring": "方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:", "name": "translateNum_1", "signature": "def translateNum_1(self, num: int) -> int"}, {"docstring": "方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:", "name": "translateNum_2", "signature": "def translateNum_2(self, num: int) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def translateNum_1(self, num: int) -> int: 方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\n- def translateNum_2(self, num: int) -> int: 方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def translateNum_1(self, num: int) -> int: 方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\n- def translateNum_2(self, num: int) -> int: 方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:\n\n<|skeleton|>\nclass Solution:\n\n def translateNum_1(self, num: int) -> int:\n \"\"\"方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\"\"\"\n <|body_0|>\n\n def translateNum_2(self, num: int) -> int:\n \"\"\"方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = str(num)\n a = b = 1\n for i in range(2, len(s) + 1):\n a, b = (a + b if '10' <= s[i - 2:i] <= '25' else a, a)\n return a\n<|end_body_0|>\n\n<|body_start_1|>\n a = b = 1\n y = num % 10\n while num != 0:\n num //= 10\n x = num % 10\n a, b = (a + b if 10 <= 10 * x + y <= 25 else a, a)\n y = x\n return a\n<|end_body_1|>\n", "revision_id": "62419b49000e79962bcdc99cd98afd2fb82ea345", "skeleton": "<|skeleton|>\nclass Solution:\n\n def translateNum_1(self, num: int) -> int:\n \"\"\"方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\"\"\"\n <|body_0|>\n\n def translateNum_2(self, num: int) -> int:\n \"\"\"方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def translateNum_1(self, num: int) -> int:\n \"\"\"方法一:字符串遍历 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(N): 字符串 s 使用 O(N) 大小的额外空间。 :param num: :return:\"\"\"\n s = str(num)\n a = b = 1\n for i in range(2, len(s) + 1):\n a, b = (a + b if '10' <= s[i - 2:i] <= '25' else a, a)\n return a\n\n def translateNum_2(self, num: int) -> int:\n \"\"\"方法二:数字求余 时间复杂度 O(N): N 为字符串 s 的长度(即数字 num 的位数 log(num) ),其决定了循环次数。 空间复杂度 O(1): 几个变量使用常数大小的额外空间。 :param num: :return:\"\"\"\n a = b = 1\n y = num % 10\n while num != 0:\n num //= 10\n x = num % 10\n a, b = (a + b if 10 <= 10 * x + y <= 25 else a, a)\n y = x\n return a\n", "source": "the_stack_v2_python_sparse", "source_path": "剑指 Offer(第 2 版)/translateNum.py", "source_repo": "MaoningGuan/LeetCode", "split": "val", "star_events_count": 3} {"blob_id": "ef8495f4415279d10c485d25a04cdbe32618085a", "bodies": ["if threshold is None:\n threshold = self._threshold\nif self._classifier is not None:\n top_matches = min(top_matches, self._matcher_tup.length - 1)\n dists, inds = self._classifier.query(emb, k=top_matches)\n dists = np.squeeze(dists).tolist()\n inds = np.squeeze(inds).tolist()\n predict_id = self._matcher_tup.labels[inds[0]]\n min_dist = dists[0]\n if min_dist <= threshold:\n top_match_ids = [self._matcher_tup.labels[idx] for i, idx in enumerate(inds) if dists[i] <= threshold]\n else:\n top_match_ids = [predict_id]\n dists = dists[0:len(top_match_ids)]\nelse:\n top_match_ids = [Config.Matcher.NEW_FACE]\n dists = [-1]\n predict_id = Config.Matcher.NEW_FACE\n min_dist = -1\nif return_dists:\n return (top_match_ids, dists)\nreturn top_match_ids", "length = len(embs)\nprint('Fit', length)\nif length > 0:\n reg_mat = np.asarray(embs).reshape((length, Config.Matcher.EMB_LENGTH))\n self._classifier = neighbors.KDTree(reg_mat, leaf_size=Config.Matcher.INDEX_LEAF_SIZE, metric='euclidean')\n self._matcher_tup = KdTreeTuple(embs, labels, length)\nelse:\n self._matcher_tup = None\n self._classifier = None", "if self._matcher_tup is None:\n self.fit(new_embs, new_labels)\nelse:\n old_embs = self._matcher_tup.embs\n old_labels = self._matcher_tup.labels\n embs = old_embs + new_embs\n labels = old_labels + new_labels\n self.fit(embs, labels)"], "bodies_text": "<|body_start_0|>\n if threshold is None:\n threshold = self._threshold\n if self._classifier is not None:\n top_matches = min(top_matches, self._matcher_tup.length - 1)\n dists, inds = self._classifier.query(emb, k=top_matches)\n dists = np.squeeze(dists).tolist()\n inds = np.squeeze(inds).tolist()\n predict_id = self._matcher_tup.labels[inds[0]]\n min_dist = dists[0]\n if min_dist <= threshold:\n top_match_ids = [self._matcher_tup.labels[idx] for i, idx in enumerate(inds) if dists[i] <= threshold]\n else:\n top_match_ids = [predict_id]\n dists = dists[0:len(top_match_ids)]\n else:\n top_match_ids = [Config.Matcher.NEW_FACE]\n dists = [-1]\n predict_id = Config.Matcher.NEW_FACE\n min_dist = -1\n if return_dists:\n return (top_match_ids, dists)\n return top_match_ids\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(embs)\n print('Fit', length)\n if length > 0:\n reg_mat = np.asarray(embs).reshape((length, Config.Matcher.EMB_LENGTH))\n self._classifier = neighbors.KDTree(reg_mat, leaf_size=Config.Matcher.INDEX_LEAF_SIZE, metric='euclidean')\n self._matcher_tup = KdTreeTuple(embs, labels, length)\n else:\n self._matcher_tup = None\n self._classifier = None\n<|end_body_1|>\n\n<|body_start_2|>\n if self._matcher_tup is None:\n self.fit(new_embs, new_labels)\n else:\n old_embs = self._matcher_tup.embs\n old_labels = self._matcher_tup.labels\n embs = old_embs + new_embs\n labels = old_labels + new_labels\n self.fit(embs, labels)\n<|end_body_2|>\n", "class_docstring": "Find neareast id for a embedding by using kd-tree", "class_name": "KdTreeMatcher", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KdTreeMatcher:\n \"\"\"Find neareast id for a embedding by using kd-tree\"\"\"\n\n def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True):\n \"\"\"See superclass doc\"\"\"\n <|body_0|>\n\n def fit(self, embs, labels):\n \"\"\"Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\"\"\"\n <|body_1|>\n\n def update(self, new_embs, new_labels):\n \"\"\"add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if threshold is None:\n threshold = self._threshold\n if self._classifier is not None:\n top_matches = min(top_matches, self._matcher_tup.length - 1)\n dists, inds = self._classifier.query(emb, k=top_matches)\n dists = np.squeeze(dists).tolist()\n inds = np.squeeze(inds).tolist()\n predict_id = self._matcher_tup.labels[inds[0]]\n min_dist = dists[0]\n if min_dist <= threshold:\n top_match_ids = [self._matcher_tup.labels[idx] for i, idx in enumerate(inds) if dists[i] <= threshold]\n else:\n top_match_ids = [predict_id]\n dists = dists[0:len(top_match_ids)]\n else:\n top_match_ids = [Config.Matcher.NEW_FACE]\n dists = [-1]\n predict_id = Config.Matcher.NEW_FACE\n min_dist = -1\n if return_dists:\n return (top_match_ids, dists)\n return top_match_ids\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(embs)\n print('Fit', length)\n if length > 0:\n reg_mat = np.asarray(embs).reshape((length, Config.Matcher.EMB_LENGTH))\n self._classifier = neighbors.KDTree(reg_mat, leaf_size=Config.Matcher.INDEX_LEAF_SIZE, metric='euclidean')\n self._matcher_tup = KdTreeTuple(embs, labels, length)\n else:\n self._matcher_tup = None\n self._classifier = None\n<|end_body_1|>\n\n<|body_start_2|>\n if self._matcher_tup is None:\n self.fit(new_embs, new_labels)\n else:\n old_embs = self._matcher_tup.embs\n old_labels = self._matcher_tup.labels\n embs = old_embs + new_embs\n labels = old_labels + new_labels\n self.fit(embs, labels)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000324", "length_bytes": 16051, "license_type": "no_license", "methods": [{"docstring": "See superclass doc", "name": "match", "signature": "def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True)"}, {"docstring": "Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb", "name": "fit", "signature": "def fit(self, embs, labels)"}, {"docstring": "add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb", "name": "update", "signature": "def update(self, new_embs, new_labels)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000227", "prompt": "Implement the Python class `KdTreeMatcher` described below.\n\nClass description:\nFind neareast id for a embedding by using kd-tree\n\nMethod signatures and docstrings:\n- def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True): See superclass doc\n- def fit(self, embs, labels): Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\n- def update(self, new_embs, new_labels): add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb", "prompted_full_text": "Implement the Python class `KdTreeMatcher` described below.\n\nClass description:\nFind neareast id for a embedding by using kd-tree\n\nMethod signatures and docstrings:\n- def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True): See superclass doc\n- def fit(self, embs, labels): Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\n- def update(self, new_embs, new_labels): add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb\n\n<|skeleton|>\nclass KdTreeMatcher:\n \"\"\"Find neareast id for a embedding by using kd-tree\"\"\"\n\n def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True):\n \"\"\"See superclass doc\"\"\"\n <|body_0|>\n\n def fit(self, embs, labels):\n \"\"\"Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\"\"\"\n <|body_1|>\n\n def update(self, new_embs, new_labels):\n \"\"\"add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if threshold is None:\n threshold = self._threshold\n if self._classifier is not None:\n top_matches = min(top_matches, self._matcher_tup.length - 1)\n dists, inds = self._classifier.query(emb, k=top_matches)\n dists = np.squeeze(dists).tolist()\n inds = np.squeeze(inds).tolist()\n predict_id = self._matcher_tup.labels[inds[0]]\n min_dist = dists[0]\n if min_dist <= threshold:\n top_match_ids = [self._matcher_tup.labels[idx] for i, idx in enumerate(inds) if dists[i] <= threshold]\n else:\n top_match_ids = [predict_id]\n dists = dists[0:len(top_match_ids)]\n else:\n top_match_ids = [Config.Matcher.NEW_FACE]\n dists = [-1]\n predict_id = Config.Matcher.NEW_FACE\n min_dist = -1\n if return_dists:\n return (top_match_ids, dists)\n return top_match_ids\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(embs)\n print('Fit', length)\n if length > 0:\n reg_mat = np.asarray(embs).reshape((length, Config.Matcher.EMB_LENGTH))\n self._classifier = neighbors.KDTree(reg_mat, leaf_size=Config.Matcher.INDEX_LEAF_SIZE, metric='euclidean')\n self._matcher_tup = KdTreeTuple(embs, labels, length)\n else:\n self._matcher_tup = None\n self._classifier = None\n<|end_body_1|>\n\n<|body_start_2|>\n if self._matcher_tup is None:\n self.fit(new_embs, new_labels)\n else:\n old_embs = self._matcher_tup.embs\n old_labels = self._matcher_tup.labels\n embs = old_embs + new_embs\n labels = old_labels + new_labels\n self.fit(embs, labels)\n<|end_body_2|>\n", "revision_id": "0f97af4e110b0e8de8d1b9f18fcd3f69c69b54cc", "skeleton": "<|skeleton|>\nclass KdTreeMatcher:\n \"\"\"Find neareast id for a embedding by using kd-tree\"\"\"\n\n def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True):\n \"\"\"See superclass doc\"\"\"\n <|body_0|>\n\n def fit(self, embs, labels):\n \"\"\"Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\"\"\"\n <|body_1|>\n\n def update(self, new_embs, new_labels):\n \"\"\"add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KdTreeMatcher:\n \"\"\"Find neareast id for a embedding by using kd-tree\"\"\"\n\n def match(self, emb, top_matches=Config.Matcher.MAX_TOP_MATCHES, threshold=None, return_dists=False, always_return_closest=True):\n \"\"\"See superclass doc\"\"\"\n if threshold is None:\n threshold = self._threshold\n if self._classifier is not None:\n top_matches = min(top_matches, self._matcher_tup.length - 1)\n dists, inds = self._classifier.query(emb, k=top_matches)\n dists = np.squeeze(dists).tolist()\n inds = np.squeeze(inds).tolist()\n predict_id = self._matcher_tup.labels[inds[0]]\n min_dist = dists[0]\n if min_dist <= threshold:\n top_match_ids = [self._matcher_tup.labels[idx] for i, idx in enumerate(inds) if dists[i] <= threshold]\n else:\n top_match_ids = [predict_id]\n dists = dists[0:len(top_match_ids)]\n else:\n top_match_ids = [Config.Matcher.NEW_FACE]\n dists = [-1]\n predict_id = Config.Matcher.NEW_FACE\n min_dist = -1\n if return_dists:\n return (top_match_ids, dists)\n return top_match_ids\n\n def fit(self, embs, labels):\n \"\"\"Fit current matcher to new embs and labels :param embs: list of embs :param labels: list of label (face id) for each emb\"\"\"\n length = len(embs)\n print('Fit', length)\n if length > 0:\n reg_mat = np.asarray(embs).reshape((length, Config.Matcher.EMB_LENGTH))\n self._classifier = neighbors.KDTree(reg_mat, leaf_size=Config.Matcher.INDEX_LEAF_SIZE, metric='euclidean')\n self._matcher_tup = KdTreeTuple(embs, labels, length)\n else:\n self._matcher_tup = None\n self._classifier = None\n\n def update(self, new_embs, new_labels):\n \"\"\"add new embs and labels to current matcher :param new_embs: list of embs :param new_labels: list of label (face id) for each emb\"\"\"\n if self._matcher_tup is None:\n self.fit(new_embs, new_labels)\n else:\n old_embs = self._matcher_tup.embs\n old_labels = self._matcher_tup.labels\n embs = old_embs + new_embs\n labels = old_labels + new_labels\n self.fit(embs, labels)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/matcher.py", "source_repo": "duongle98/Face-Rec", "split": "val", "star_events_count": 1} {"blob_id": "3a38abf1f9be341e3507e9174e8fa6c4dd0e8036", "bodies": ["if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\nself.fname = fname\nwith open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))", "fopen.seek(0, 0)\nfopen.write(b'\\x84\\x83\\x82\\x81')\nfopen.write(struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00')))\nfopen.write(struct.pack('<16s', uid.bytes))\nfopen.seek(8, 1)", "json_start = fopen.tell()\nfopen.seek(52, 0)\nfopen.write(struct.pack('\n if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\n self.fname = fname\n with open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))\n<|end_body_0|>\n\n<|body_start_1|>\n fopen.seek(0, 0)\n fopen.write(b'\\x84\\x83\\x82\\x81')\n fopen.write(struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00')))\n fopen.write(struct.pack('<16s', uid.bytes))\n fopen.seek(8, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('\n", "class_docstring": "OMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.", "class_name": "OMFWriter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OMFWriter:\n \"\"\"OMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\"\"\"\n\n def __init__(self, project, fname):\n \"\"\"Project serialization is performed on OMFWriter init Binary data is written during project serialization\"\"\"\n <|body_0|>\n\n def initialize_header(fopen, uid):\n \"\"\"Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\"\"\"\n <|body_1|>\n\n def update_header(fopen):\n \"\"\"Return to header and write the correct JSON start location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\n self.fname = fname\n with open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))\n<|end_body_0|>\n\n<|body_start_1|>\n fopen.seek(0, 0)\n fopen.write(b'\\x84\\x83\\x82\\x81')\n fopen.write(struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00')))\n fopen.write(struct.pack('<16s', uid.bytes))\n fopen.seek(8, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('\n", "id": "stack_v2_sparse_classes_10k_val_000325", "length_bytes": 6061, "license_type": "permissive", "methods": [{"docstring": "Project serialization is performed on OMFWriter init Binary data is written during project serialization", "name": "__init__", "signature": "def __init__(self, project, fname)"}, {"docstring": "Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)", "name": "initialize_header", "signature": "def initialize_header(fopen, uid)"}, {"docstring": "Return to header and write the correct JSON start location", "name": "update_header", "signature": "def update_header(fopen)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003396", "prompt": "Implement the Python class `OMFWriter` described below.\n\nClass description:\nOMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\n\nMethod signatures and docstrings:\n- def __init__(self, project, fname): Project serialization is performed on OMFWriter init Binary data is written during project serialization\n- def initialize_header(fopen, uid): Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\n- def update_header(fopen): Return to header and write the correct JSON start location", "prompted_full_text": "Implement the Python class `OMFWriter` described below.\n\nClass description:\nOMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\n\nMethod signatures and docstrings:\n- def __init__(self, project, fname): Project serialization is performed on OMFWriter init Binary data is written during project serialization\n- def initialize_header(fopen, uid): Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\n- def update_header(fopen): Return to header and write the correct JSON start location\n\n<|skeleton|>\nclass OMFWriter:\n \"\"\"OMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\"\"\"\n\n def __init__(self, project, fname):\n \"\"\"Project serialization is performed on OMFWriter init Binary data is written during project serialization\"\"\"\n <|body_0|>\n\n def initialize_header(fopen, uid):\n \"\"\"Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\"\"\"\n <|body_1|>\n\n def update_header(fopen):\n \"\"\"Return to header and write the correct JSON start location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\n self.fname = fname\n with open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))\n<|end_body_0|>\n\n<|body_start_1|>\n fopen.seek(0, 0)\n fopen.write(b'\\x84\\x83\\x82\\x81')\n fopen.write(struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00')))\n fopen.write(struct.pack('<16s', uid.bytes))\n fopen.seek(8, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack('\n", "revision_id": "d35f04c8ab8f007384a7bea4d4997572daf38553", "skeleton": "<|skeleton|>\nclass OMFWriter:\n \"\"\"OMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\"\"\"\n\n def __init__(self, project, fname):\n \"\"\"Project serialization is performed on OMFWriter init Binary data is written during project serialization\"\"\"\n <|body_0|>\n\n def initialize_header(fopen, uid):\n \"\"\"Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\"\"\"\n <|body_1|>\n\n def update_header(fopen):\n \"\"\"Return to header and write the correct JSON start location\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OMFWriter:\n \"\"\"OMFWriter serializes a OMF project to a file .. code:: proj = omf.project() ... omf.OMFWriter(proj, 'outfile.omf') The output file starts with a 60 byte header: * 4 byte magic number: :code:`b'\\\\x81\\\\x82\\\\x83\\\\x84'` * 32 byte version string: :code:`'OMF-v0.9.0'` (other bytes empty) * 16 byte project uid (in little-endian bytes) * 8 byte unsigned long long (little-endian): JSON start location in file Following the header is a binary data blob. Following the binary is a UTF-8 encoded JSON dictionary containing all elements of the project keyed by UID string. Objects can reference each other by UID, and arrays and images contain pointers to their data in the binary blob.\"\"\"\n\n def __init__(self, project, fname):\n \"\"\"Project serialization is performed on OMFWriter init Binary data is written during project serialization\"\"\"\n if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\n self.fname = fname\n with open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))\n\n def initialize_header(fopen, uid):\n \"\"\"Write magic number, version string, project uid, and zero bytes Total header length = 60 bytes 4 (magic number) + 32 (version) + 16 (uid in bytes) + 8 (JSON start, written later)\"\"\"\n fopen.seek(0, 0)\n fopen.write(b'\\x84\\x83\\x82\\x81')\n fopen.write(struct.pack('<32s', COMPATIBILITY_VERSION.ljust(32, b'\\x00')))\n fopen.write(struct.pack('<16s', uid.bytes))\n fopen.seek(8, 1)\n\n def update_header(fopen):\n \"\"\"Return to header and write the correct JSON start location\"\"\"\n json_start = fopen.tell()\n fopen.seek(52, 0)\n fopen.write(struct.pack(' 1:\n self.dictionary.rebuild_by_freq(thd)\nids_list = []\nwith open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\nreturn ids_list"], "bodies_text": "<|body_start_0|>\n if not os.path.exists(path):\n os.mkdir(path)\n dict_file_name = os.path.join(path, 'dict.pkl')\n if os.path.exists(dict_file_name):\n print('Loading dictionary...')\n self.dictionary = pickle.load(open(dict_file_name, 'rb'))\n build_dict = False\n else:\n self.dictionary = Dictionary()\n build_dict = True\n train_path = os.path.join(path, 'train.txt')\n if not os.path.exists(train_path):\n wget.download(urls['train'], train_path)\n self.train = self.tokenize(train_path, build_dict=build_dict, thd=thd)\n valid_path = os.path.join(path, 'valid.txt')\n if not os.path.exists(valid_path):\n wget.download(urls['valid'], valid_path)\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n test_path = os.path.join(path, 'test.txt')\n if not os.path.exists(test_path):\n wget.download(urls['test'], test_path)\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n if build_dict:\n print('Saving dictionary...')\n dict_file_name = os.path.join(path, 'dict.pkl')\n pickle.dump(self.dictionary, open(dict_file_name, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n assert os.path.exists(path)\n if build_dict:\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n return ids_list\n<|end_body_1|>\n", "class_docstring": "Word-level language model corpus.", "class_name": "Corpus", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Corpus:\n \"\"\"Word-level language model corpus.\"\"\"\n\n def __init__(self, path, thd=0):\n \"\"\"Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \"\"\"\n <|body_0|>\n\n def tokenize(self, path, build_dict=False, thd=0):\n \"\"\"Tokenizes a text file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not os.path.exists(path):\n os.mkdir(path)\n dict_file_name = os.path.join(path, 'dict.pkl')\n if os.path.exists(dict_file_name):\n print('Loading dictionary...')\n self.dictionary = pickle.load(open(dict_file_name, 'rb'))\n build_dict = False\n else:\n self.dictionary = Dictionary()\n build_dict = True\n train_path = os.path.join(path, 'train.txt')\n if not os.path.exists(train_path):\n wget.download(urls['train'], train_path)\n self.train = self.tokenize(train_path, build_dict=build_dict, thd=thd)\n valid_path = os.path.join(path, 'valid.txt')\n if not os.path.exists(valid_path):\n wget.download(urls['valid'], valid_path)\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n test_path = os.path.join(path, 'test.txt')\n if not os.path.exists(test_path):\n wget.download(urls['test'], test_path)\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n if build_dict:\n print('Saving dictionary...')\n dict_file_name = os.path.join(path, 'dict.pkl')\n pickle.dump(self.dictionary, open(dict_file_name, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n assert os.path.exists(path)\n if build_dict:\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n return ids_list\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000326", "length_bytes": 4269, "license_type": "permissive", "methods": [{"docstring": "Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by ", "name": "__init__", "signature": "def __init__(self, path, thd=0)"}, {"docstring": "Tokenizes a text file.", "name": "tokenize", "signature": "def tokenize(self, path, build_dict=False, thd=0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004735", "prompt": "Implement the Python class `Corpus` described below.\n\nClass description:\nWord-level language model corpus.\n\nMethod signatures and docstrings:\n- def __init__(self, path, thd=0): Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \n- def tokenize(self, path, build_dict=False, thd=0): Tokenizes a text file.", "prompted_full_text": "Implement the Python class `Corpus` described below.\n\nClass description:\nWord-level language model corpus.\n\nMethod signatures and docstrings:\n- def __init__(self, path, thd=0): Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \n- def tokenize(self, path, build_dict=False, thd=0): Tokenizes a text file.\n\n<|skeleton|>\nclass Corpus:\n \"\"\"Word-level language model corpus.\"\"\"\n\n def __init__(self, path, thd=0):\n \"\"\"Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \"\"\"\n <|body_0|>\n\n def tokenize(self, path, build_dict=False, thd=0):\n \"\"\"Tokenizes a text file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not os.path.exists(path):\n os.mkdir(path)\n dict_file_name = os.path.join(path, 'dict.pkl')\n if os.path.exists(dict_file_name):\n print('Loading dictionary...')\n self.dictionary = pickle.load(open(dict_file_name, 'rb'))\n build_dict = False\n else:\n self.dictionary = Dictionary()\n build_dict = True\n train_path = os.path.join(path, 'train.txt')\n if not os.path.exists(train_path):\n wget.download(urls['train'], train_path)\n self.train = self.tokenize(train_path, build_dict=build_dict, thd=thd)\n valid_path = os.path.join(path, 'valid.txt')\n if not os.path.exists(valid_path):\n wget.download(urls['valid'], valid_path)\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n test_path = os.path.join(path, 'test.txt')\n if not os.path.exists(test_path):\n wget.download(urls['test'], test_path)\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n if build_dict:\n print('Saving dictionary...')\n dict_file_name = os.path.join(path, 'dict.pkl')\n pickle.dump(self.dictionary, open(dict_file_name, 'wb'))\n<|end_body_0|>\n\n<|body_start_1|>\n assert os.path.exists(path)\n if build_dict:\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n return ids_list\n<|end_body_1|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass Corpus:\n \"\"\"Word-level language model corpus.\"\"\"\n\n def __init__(self, path, thd=0):\n \"\"\"Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \"\"\"\n <|body_0|>\n\n def tokenize(self, path, build_dict=False, thd=0):\n \"\"\"Tokenizes a text file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Corpus:\n \"\"\"Word-level language model corpus.\"\"\"\n\n def __init__(self, path, thd=0):\n \"\"\"Initialization. Args: path: path to corpus location, the folder should include 'train.txt', 'valid.txt' and 'test.txt' thd: tokens that appears less then thd times in train.txt will be replaced by \"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n dict_file_name = os.path.join(path, 'dict.pkl')\n if os.path.exists(dict_file_name):\n print('Loading dictionary...')\n self.dictionary = pickle.load(open(dict_file_name, 'rb'))\n build_dict = False\n else:\n self.dictionary = Dictionary()\n build_dict = True\n train_path = os.path.join(path, 'train.txt')\n if not os.path.exists(train_path):\n wget.download(urls['train'], train_path)\n self.train = self.tokenize(train_path, build_dict=build_dict, thd=thd)\n valid_path = os.path.join(path, 'valid.txt')\n if not os.path.exists(valid_path):\n wget.download(urls['valid'], valid_path)\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n test_path = os.path.join(path, 'test.txt')\n if not os.path.exists(test_path):\n wget.download(urls['test'], test_path)\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n if build_dict:\n print('Saving dictionary...')\n dict_file_name = os.path.join(path, 'dict.pkl')\n pickle.dump(self.dictionary, open(dict_file_name, 'wb'))\n\n def tokenize(self, path, build_dict=False, thd=0):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n if build_dict:\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n for word in words:\n self.dictionary.add_word(word)\n if thd > 1:\n self.dictionary.rebuild_by_freq(thd)\n ids_list = []\n with open(path, 'r') as f:\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(self.dictionary[word])\n ids_list.append(ids)\n return ids_list\n", "source": "the_stack_v2_python_sparse", "source_path": "structformer/data_penn.py", "source_repo": "Jimmy-INL/google-research", "split": "val", "star_events_count": 1} {"blob_id": "0e5802b0c29271bac19e6bef6d1ab89b247a6dd7", "bodies": ["if n == 1 or n == 0:\n return n\na, b = (1, 2)\nfor _ in range(2, n):\n tmp = a + b\n a = b\n b = tmp\nreturn b", "if n == 0 or n == 1:\n return n\na = [1, 2]\nfor i in range(2, n):\n a.append(a[i - 1] + a[i - 2])\nreturn a[-1]", "if n == 1 or n == 0:\n return n\nif n == 2:\n return 2\nreturn self.climbStairs(n - 1) + self.climbStairs(n - 2)"], "bodies_text": "<|body_start_0|>\n if n == 1 or n == 0:\n return n\n a, b = (1, 2)\n for _ in range(2, n):\n tmp = a + b\n a = b\n b = tmp\n return b\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0 or n == 1:\n return n\n a = [1, 2]\n for i in range(2, n):\n a.append(a[i - 1] + a[i - 2])\n return a[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if n == 1 or n == 0:\n return n\n if n == 2:\n return 2\n return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def dynamic_method(self, n):\n \"\"\"和上面用斐波那契数列方法本质是一样的 :param n: :return:\"\"\"\n <|body_1|>\n\n def recursive_method(self, n):\n \"\"\"这个方法超时了 :param n: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n == 1 or n == 0:\n return n\n a, b = (1, 2)\n for _ in range(2, n):\n tmp = a + b\n a = b\n b = tmp\n return b\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0 or n == 1:\n return n\n a = [1, 2]\n for i in range(2, n):\n a.append(a[i - 1] + a[i - 2])\n return a[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if n == 1 or n == 0:\n return n\n if n == 2:\n return 2\n return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000327", "length_bytes": 1078, "license_type": "permissive", "methods": [{"docstring": ":type n: int :rtype: int", "name": "climbStairs", "signature": "def climbStairs(self, n)"}, {"docstring": "和上面用斐波那契数列方法本质是一样的 :param n: :return:", "name": "dynamic_method", "signature": "def dynamic_method(self, n)"}, {"docstring": "这个方法超时了 :param n: :return:", "name": "recursive_method", "signature": "def recursive_method(self, n)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004876", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def climbStairs(self, n): :type n: int :rtype: int\n- def dynamic_method(self, n): 和上面用斐波那契数列方法本质是一样的 :param n: :return:\n- def recursive_method(self, n): 这个方法超时了 :param n: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def climbStairs(self, n): :type n: int :rtype: int\n- def dynamic_method(self, n): 和上面用斐波那契数列方法本质是一样的 :param n: :return:\n- def recursive_method(self, n): 这个方法超时了 :param n: :return:\n\n<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def dynamic_method(self, n):\n \"\"\"和上面用斐波那契数列方法本质是一样的 :param n: :return:\"\"\"\n <|body_1|>\n\n def recursive_method(self, n):\n \"\"\"这个方法超时了 :param n: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n == 1 or n == 0:\n return n\n a, b = (1, 2)\n for _ in range(2, n):\n tmp = a + b\n a = b\n b = tmp\n return b\n<|end_body_0|>\n\n<|body_start_1|>\n if n == 0 or n == 1:\n return n\n a = [1, 2]\n for i in range(2, n):\n a.append(a[i - 1] + a[i - 2])\n return a[-1]\n<|end_body_1|>\n\n<|body_start_2|>\n if n == 1 or n == 0:\n return n\n if n == 2:\n return 2\n return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n<|end_body_2|>\n", "revision_id": "f71118e8e05d4bcdcfb2dfc42187c73961b8b926", "skeleton": "<|skeleton|>\nclass Solution:\n\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def dynamic_method(self, n):\n \"\"\"和上面用斐波那契数列方法本质是一样的 :param n: :return:\"\"\"\n <|body_1|>\n\n def recursive_method(self, n):\n \"\"\"这个方法超时了 :param n: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def climbStairs(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n == 1 or n == 0:\n return n\n a, b = (1, 2)\n for _ in range(2, n):\n tmp = a + b\n a = b\n b = tmp\n return b\n\n def dynamic_method(self, n):\n \"\"\"和上面用斐波那契数列方法本质是一样的 :param n: :return:\"\"\"\n if n == 0 or n == 1:\n return n\n a = [1, 2]\n for i in range(2, n):\n a.append(a[i - 1] + a[i - 2])\n return a[-1]\n\n def recursive_method(self, n):\n \"\"\"这个方法超时了 :param n: :return:\"\"\"\n if n == 1 or n == 0:\n return n\n if n == 2:\n return 2\n return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode-algorithms/070. Climbing Stairs/solution.py", "source_repo": "bbruceyuan/algorithms-and-oj", "split": "val", "star_events_count": 11} {"blob_id": "29d15c4c250ca4850556ce4ef32048387d4e4249", "bodies": ["e = data_types.WebTestExpectation('test*', ['tag1'], 'Failure')\nself.assertTrue(e._CompareWildcard('testing123'))\nself.assertTrue(e._CompareWildcard('virtual/some-identifier/testing123'))\nself.assertTrue(e._CompareWildcard('test'))\nself.assertTrue(e._CompareWildcard('virtual/some-identifier/test'))\nself.assertFalse(e._CompareWildcard('tes'))\nself.assertFalse(e._CompareWildcard('/virtual/some-identifier/test'))\nself.assertFalse(e._CompareWildcard('virtual/some/malformed/test'))", "e = data_types.WebTestExpectation('test', ['tag1'], 'Failure')\nself.assertTrue(e._CompareNonWildcard('test'))\nself.assertTrue(e._CompareNonWildcard('virtual/some-identifier/test'))\nself.assertFalse(e._CompareNonWildcard('tes'))\nself.assertFalse(e._CompareNonWildcard('/virtual/some-identifier/test'))\nself.assertFalse(e._CompareNonWildcard('virtual/some/malformed/test'))"], "bodies_text": "<|body_start_0|>\n e = data_types.WebTestExpectation('test*', ['tag1'], 'Failure')\n self.assertTrue(e._CompareWildcard('testing123'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/testing123'))\n self.assertTrue(e._CompareWildcard('test'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('tes'))\n self.assertFalse(e._CompareWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('virtual/some/malformed/test'))\n<|end_body_0|>\n\n<|body_start_1|>\n e = data_types.WebTestExpectation('test', ['tag1'], 'Failure')\n self.assertTrue(e._CompareNonWildcard('test'))\n self.assertTrue(e._CompareNonWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('tes'))\n self.assertFalse(e._CompareNonWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('virtual/some/malformed/test'))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "WebTestExpectationUnittest", "detected_licenses": ["LGPL-2.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "LGPL-2.1-only", "GPL-1.0-or-later", "GPL-2.0-only", "LGPL-2.0-only", "BSD-2-Clause", "LicenseRef-scancode-other-copyleft", "BSD-3-Clause", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WebTestExpectationUnittest:\n\n def testCompareWildcard(self):\n \"\"\"Tests that wildcard comparisons work as expected.\"\"\"\n <|body_0|>\n\n def testCompareNonWildcard(self):\n \"\"\"Tests that non-wildcard comparisons work as expected.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n e = data_types.WebTestExpectation('test*', ['tag1'], 'Failure')\n self.assertTrue(e._CompareWildcard('testing123'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/testing123'))\n self.assertTrue(e._CompareWildcard('test'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('tes'))\n self.assertFalse(e._CompareWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('virtual/some/malformed/test'))\n<|end_body_0|>\n\n<|body_start_1|>\n e = data_types.WebTestExpectation('test', ['tag1'], 'Failure')\n self.assertTrue(e._CompareNonWildcard('test'))\n self.assertTrue(e._CompareNonWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('tes'))\n self.assertFalse(e._CompareNonWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('virtual/some/malformed/test'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000328", "length_bytes": 10707, "license_type": "permissive", "methods": [{"docstring": "Tests that wildcard comparisons work as expected.", "name": "testCompareWildcard", "signature": "def testCompareWildcard(self)"}, {"docstring": "Tests that non-wildcard comparisons work as expected.", "name": "testCompareNonWildcard", "signature": "def testCompareNonWildcard(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003684", "prompt": "Implement the Python class `WebTestExpectationUnittest` described below.\n\nClass description:\nImplement the WebTestExpectationUnittest class.\n\nMethod signatures and docstrings:\n- def testCompareWildcard(self): Tests that wildcard comparisons work as expected.\n- def testCompareNonWildcard(self): Tests that non-wildcard comparisons work as expected.", "prompted_full_text": "Implement the Python class `WebTestExpectationUnittest` described below.\n\nClass description:\nImplement the WebTestExpectationUnittest class.\n\nMethod signatures and docstrings:\n- def testCompareWildcard(self): Tests that wildcard comparisons work as expected.\n- def testCompareNonWildcard(self): Tests that non-wildcard comparisons work as expected.\n\n<|skeleton|>\nclass WebTestExpectationUnittest:\n\n def testCompareWildcard(self):\n \"\"\"Tests that wildcard comparisons work as expected.\"\"\"\n <|body_0|>\n\n def testCompareNonWildcard(self):\n \"\"\"Tests that non-wildcard comparisons work as expected.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n e = data_types.WebTestExpectation('test*', ['tag1'], 'Failure')\n self.assertTrue(e._CompareWildcard('testing123'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/testing123'))\n self.assertTrue(e._CompareWildcard('test'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('tes'))\n self.assertFalse(e._CompareWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('virtual/some/malformed/test'))\n<|end_body_0|>\n\n<|body_start_1|>\n e = data_types.WebTestExpectation('test', ['tag1'], 'Failure')\n self.assertTrue(e._CompareNonWildcard('test'))\n self.assertTrue(e._CompareNonWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('tes'))\n self.assertFalse(e._CompareNonWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('virtual/some/malformed/test'))\n<|end_body_1|>\n", "revision_id": "fd8a8914ca0183f0add65ae55f04e287543c7d4a", "skeleton": "<|skeleton|>\nclass WebTestExpectationUnittest:\n\n def testCompareWildcard(self):\n \"\"\"Tests that wildcard comparisons work as expected.\"\"\"\n <|body_0|>\n\n def testCompareNonWildcard(self):\n \"\"\"Tests that non-wildcard comparisons work as expected.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WebTestExpectationUnittest:\n def testCompareWildcard(self):\n \"\"\"Tests that wildcard comparisons work as expected.\"\"\"\n e = data_types.WebTestExpectation('test*', ['tag1'], 'Failure')\n self.assertTrue(e._CompareWildcard('testing123'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/testing123'))\n self.assertTrue(e._CompareWildcard('test'))\n self.assertTrue(e._CompareWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('tes'))\n self.assertFalse(e._CompareWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareWildcard('virtual/some/malformed/test'))\n\n def testCompareNonWildcard(self):\n \"\"\"Tests that non-wildcard comparisons work as expected.\"\"\"\n e = data_types.WebTestExpectation('test', ['tag1'], 'Failure')\n self.assertTrue(e._CompareNonWildcard('test'))\n self.assertTrue(e._CompareNonWildcard('virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('tes'))\n self.assertFalse(e._CompareNonWildcard('/virtual/some-identifier/test'))\n self.assertFalse(e._CompareNonWildcard('virtual/some/malformed/test'))\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/blink/tools/blinkpy/web_tests/stale_expectation_removal/data_types_unittest.py", "source_repo": "SREERAGI18/chromium", "split": "val", "star_events_count": 1} {"blob_id": "f4d8b32220926433d2d1a23a2e1371ff284c648b", "bodies": ["super(PatchMerging, self).__init__()\nself.normalization: nn.Module = nn.LayerNorm(normalized_shape=4 * in_channels)\nself.linear_mapping: nn.Module = nn.Linear(in_features=4 * in_channels, out_features=2 * in_channels, bias=False)", "batch_size, channels, height, width = input.shape\ninput: torch.Tensor = bchw_to_bhwc(input)\ninput: torch.Tensor = input.unfold(dimension=1, size=2, step=2).unfold(dimension=2, size=2, step=2)\ninput: torch.Tensor = input.reshape(batch_size, input.shape[1], input.shape[2], -1)\ninput: torch.Tensor = self.normalization(input)\noutput: torch.Tensor = bhwc_to_bchw(self.linear_mapping(input))\nreturn output"], "bodies_text": "<|body_start_0|>\n super(PatchMerging, self).__init__()\n self.normalization: nn.Module = nn.LayerNorm(normalized_shape=4 * in_channels)\n self.linear_mapping: nn.Module = nn.Linear(in_features=4 * in_channels, out_features=2 * in_channels, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, channels, height, width = input.shape\n input: torch.Tensor = bchw_to_bhwc(input)\n input: torch.Tensor = input.unfold(dimension=1, size=2, step=2).unfold(dimension=2, size=2, step=2)\n input: torch.Tensor = input.reshape(batch_size, input.shape[1], input.shape[2], -1)\n input: torch.Tensor = self.normalization(input)\n output: torch.Tensor = bhwc_to_bchw(self.linear_mapping(input))\n return output\n<|end_body_1|>\n", "class_docstring": "This class implements the patch merging approach which is essential a strided convolution with normalization before", "class_name": "PatchMerging", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PatchMerging:\n \"\"\"This class implements the patch merging approach which is essential a strided convolution with normalization before\"\"\"\n\n def __init__(self, in_channels: int) -> None:\n \"\"\"Constructor method :param in_channels: (int) Number of input channels\"\"\"\n <|body_0|>\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PatchMerging, self).__init__()\n self.normalization: nn.Module = nn.LayerNorm(normalized_shape=4 * in_channels)\n self.linear_mapping: nn.Module = nn.Linear(in_features=4 * in_channels, out_features=2 * in_channels, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, channels, height, width = input.shape\n input: torch.Tensor = bchw_to_bhwc(input)\n input: torch.Tensor = input.unfold(dimension=1, size=2, step=2).unfold(dimension=2, size=2, step=2)\n input: torch.Tensor = input.reshape(batch_size, input.shape[1], input.shape[2], -1)\n input: torch.Tensor = self.normalization(input)\n output: torch.Tensor = bhwc_to_bchw(self.linear_mapping(input))\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000329", "length_bytes": 41159, "license_type": "no_license", "methods": [{"docstring": "Constructor method :param in_channels: (int) Number of input channels", "name": "__init__", "signature": "def __init__(self, in_channels: int) -> None"}, {"docstring": "Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]", "name": "forward", "signature": "def forward(self, input: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004890", "prompt": "Implement the Python class `PatchMerging` described below.\n\nClass description:\nThis class implements the patch merging approach which is essential a strided convolution with normalization before\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels: int) -> None: Constructor method :param in_channels: (int) Number of input channels\n- def forward(self, input: torch.Tensor) -> torch.Tensor: Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]", "prompted_full_text": "Implement the Python class `PatchMerging` described below.\n\nClass description:\nThis class implements the patch merging approach which is essential a strided convolution with normalization before\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels: int) -> None: Constructor method :param in_channels: (int) Number of input channels\n- def forward(self, input: torch.Tensor) -> torch.Tensor: Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]\n\n<|skeleton|>\nclass PatchMerging:\n \"\"\"This class implements the patch merging approach which is essential a strided convolution with normalization before\"\"\"\n\n def __init__(self, in_channels: int) -> None:\n \"\"\"Constructor method :param in_channels: (int) Number of input channels\"\"\"\n <|body_0|>\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PatchMerging, self).__init__()\n self.normalization: nn.Module = nn.LayerNorm(normalized_shape=4 * in_channels)\n self.linear_mapping: nn.Module = nn.Linear(in_features=4 * in_channels, out_features=2 * in_channels, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, channels, height, width = input.shape\n input: torch.Tensor = bchw_to_bhwc(input)\n input: torch.Tensor = input.unfold(dimension=1, size=2, step=2).unfold(dimension=2, size=2, step=2)\n input: torch.Tensor = input.reshape(batch_size, input.shape[1], input.shape[2], -1)\n input: torch.Tensor = self.normalization(input)\n output: torch.Tensor = bhwc_to_bchw(self.linear_mapping(input))\n return output\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass PatchMerging:\n \"\"\"This class implements the patch merging approach which is essential a strided convolution with normalization before\"\"\"\n\n def __init__(self, in_channels: int) -> None:\n \"\"\"Constructor method :param in_channels: (int) Number of input channels\"\"\"\n <|body_0|>\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PatchMerging:\n \"\"\"This class implements the patch merging approach which is essential a strided convolution with normalization before\"\"\"\n\n def __init__(self, in_channels: int) -> None:\n \"\"\"Constructor method :param in_channels: (int) Number of input channels\"\"\"\n super(PatchMerging, self).__init__()\n self.normalization: nn.Module = nn.LayerNorm(normalized_shape=4 * in_channels)\n self.linear_mapping: nn.Module = nn.Linear(in_features=4 * in_channels, out_features=2 * in_channels, bias=False)\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass :param input: (torch.Tensor) Input tensor of the shape [batch size, in channels, height, width] :return: (torch.Tensor) Output tensor of the shape [batch size, 2 * in channels, height // 2, width // 2]\"\"\"\n batch_size, channels, height, width = input.shape\n input: torch.Tensor = bchw_to_bhwc(input)\n input: torch.Tensor = input.unfold(dimension=1, size=2, step=2).unfold(dimension=2, size=2, step=2)\n input: torch.Tensor = input.reshape(batch_size, input.shape[1], input.shape[2], -1)\n input: torch.Tensor = self.normalization(input)\n output: torch.Tensor = bhwc_to_bchw(self.linear_mapping(input))\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_ChristophReich1996_Swin_Transformer_V2.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "a5d7de3a132d7119ee8682384c55f22ad4f90792", "bodies": ["size, res, path = (len(candidates), [], [])\ncandidates.sort()\n\ndef dfs(candidates, begin, target, res, path):\n if target == 0:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, target - candidates[i], res, path)\n path.pop()\ndfs(candidates, 0, target, res, path)\nreturn res", "size, count, res, path = (len(candidates), 0, [], [])\ncandidates.sort()\n\ndef dfs(candidates, begin, count, res, path):\n if count == target:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if candidates[i] + count > target:\n continue\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, sum(path), res, path)\n count -= candidates[i]\n path.pop()\ndfs(candidates, 0, count, res, path)\nreturn res"], "bodies_text": "<|body_start_0|>\n size, res, path = (len(candidates), [], [])\n candidates.sort()\n\n def dfs(candidates, begin, target, res, path):\n if target == 0:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, target - candidates[i], res, path)\n path.pop()\n dfs(candidates, 0, target, res, path)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n size, count, res, path = (len(candidates), 0, [], [])\n candidates.sort()\n\n def dfs(candidates, begin, count, res, path):\n if count == target:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if candidates[i] + count > target:\n continue\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, sum(path), res, path)\n count -= candidates[i]\n path.pop()\n dfs(candidates, 0, count, res, path)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"减法思维\"\"\"\n <|body_0|>\n\n def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"加法思维\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n size, res, path = (len(candidates), [], [])\n candidates.sort()\n\n def dfs(candidates, begin, target, res, path):\n if target == 0:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, target - candidates[i], res, path)\n path.pop()\n dfs(candidates, 0, target, res, path)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n size, count, res, path = (len(candidates), 0, [], [])\n candidates.sort()\n\n def dfs(candidates, begin, count, res, path):\n if count == target:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if candidates[i] + count > target:\n continue\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, sum(path), res, path)\n count -= candidates[i]\n path.pop()\n dfs(candidates, 0, count, res, path)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000330", "length_bytes": 2215, "license_type": "no_license", "methods": [{"docstring": "减法思维", "name": "combinationSum2", "signature": "def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]"}, {"docstring": "加法思维", "name": "combinationSum2_1", "signature": "def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]: 减法思维\n- def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]: 加法思维", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]: 减法思维\n- def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]: 加法思维\n\n<|skeleton|>\nclass Solution:\n\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"减法思维\"\"\"\n <|body_0|>\n\n def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"加法思维\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n size, res, path = (len(candidates), [], [])\n candidates.sort()\n\n def dfs(candidates, begin, target, res, path):\n if target == 0:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, target - candidates[i], res, path)\n path.pop()\n dfs(candidates, 0, target, res, path)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n size, count, res, path = (len(candidates), 0, [], [])\n candidates.sort()\n\n def dfs(candidates, begin, count, res, path):\n if count == target:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if candidates[i] + count > target:\n continue\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, sum(path), res, path)\n count -= candidates[i]\n path.pop()\n dfs(candidates, 0, count, res, path)\n return res\n<|end_body_1|>\n", "revision_id": "3508e1ce089131b19603c3206aab4cf43023bb19", "skeleton": "<|skeleton|>\nclass Solution:\n\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"减法思维\"\"\"\n <|body_0|>\n\n def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"加法思维\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"减法思维\"\"\"\n size, res, path = (len(candidates), [], [])\n candidates.sort()\n\n def dfs(candidates, begin, target, res, path):\n if target == 0:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, target - candidates[i], res, path)\n path.pop()\n dfs(candidates, 0, target, res, path)\n return res\n\n def combinationSum2_1(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"加法思维\"\"\"\n size, count, res, path = (len(candidates), 0, [], [])\n candidates.sort()\n\n def dfs(candidates, begin, count, res, path):\n if count == target:\n res.append(path[:])\n return\n for i in range(begin, size):\n if candidates[i] > target:\n break\n if candidates[i] + count > target:\n continue\n if i > begin and candidates[i] == candidates[i - 1]:\n continue\n path.append(candidates[i])\n dfs(candidates, i + 1, sum(path), res, path)\n count -= candidates[i]\n path.pop()\n dfs(candidates, 0, count, res, path)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithm/leetcode/backtracking/07-组合总和Ⅱ.py", "source_repo": "lxconfig/UbuntuCode_bak", "split": "val", "star_events_count": 0} {"blob_id": "141af823bbc459f86c8522375ad450a9df1fa58c", "bodies": ["self.eggForms = forms\nself.FormCls = FormCls\nself.conceptIdToForm = {}\nself.languageWrapper = languageWrapper", "for eggForm in self.eggForms:\n if eggForm.conceptId not in self.conceptIdToForm:\n form = self.FormCls(text=eggForm.text, concept=concepts[eggForm.conceptId], language=self.languageWrapper.language)\n self.conceptIdToForm[eggForm.conceptId] = form\n server.db.session.add(form)", "for eggForm in self.eggForms:\n form = self.FormCls.query.filter_by(text=eggForm.text, language=self.languageWrapper.language).first()\n if form is not None:\n self.conceptIdToForm[eggForm.conceptId] = form\nreturn self.conceptIdToForm"], "bodies_text": "<|body_start_0|>\n self.eggForms = forms\n self.FormCls = FormCls\n self.conceptIdToForm = {}\n self.languageWrapper = languageWrapper\n<|end_body_0|>\n\n<|body_start_1|>\n for eggForm in self.eggForms:\n if eggForm.conceptId not in self.conceptIdToForm:\n form = self.FormCls(text=eggForm.text, concept=concepts[eggForm.conceptId], language=self.languageWrapper.language)\n self.conceptIdToForm[eggForm.conceptId] = form\n server.db.session.add(form)\n<|end_body_1|>\n\n<|body_start_2|>\n for eggForm in self.eggForms:\n form = self.FormCls.query.filter_by(text=eggForm.text, language=self.languageWrapper.language).first()\n if form is not None:\n self.conceptIdToForm[eggForm.conceptId] = form\n return self.conceptIdToForm\n<|end_body_2|>\n", "class_docstring": "Wrapper to handle properly loading the various concept forms", "class_name": "FormsWrapper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FormsWrapper:\n \"\"\"Wrapper to handle properly loading the various concept forms\"\"\"\n\n def __init__(self, forms, FormCls, languageWrapper):\n \"\"\"Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\"\"\"\n <|body_0|>\n\n def load(self, concepts):\n \"\"\"Load the forms\"\"\"\n <|body_1|>\n\n def find(self):\n \"\"\"Find the existing forms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.eggForms = forms\n self.FormCls = FormCls\n self.conceptIdToForm = {}\n self.languageWrapper = languageWrapper\n<|end_body_0|>\n\n<|body_start_1|>\n for eggForm in self.eggForms:\n if eggForm.conceptId not in self.conceptIdToForm:\n form = self.FormCls(text=eggForm.text, concept=concepts[eggForm.conceptId], language=self.languageWrapper.language)\n self.conceptIdToForm[eggForm.conceptId] = form\n server.db.session.add(form)\n<|end_body_1|>\n\n<|body_start_2|>\n for eggForm in self.eggForms:\n form = self.FormCls.query.filter_by(text=eggForm.text, language=self.languageWrapper.language).first()\n if form is not None:\n self.conceptIdToForm[eggForm.conceptId] = form\n return self.conceptIdToForm\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000331", "length_bytes": 1219, "license_type": "no_license", "methods": [{"docstring": "Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class", "name": "__init__", "signature": "def __init__(self, forms, FormCls, languageWrapper)"}, {"docstring": "Load the forms", "name": "load", "signature": "def load(self, concepts)"}, {"docstring": "Find the existing forms", "name": "find", "signature": "def find(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004571", "prompt": "Implement the Python class `FormsWrapper` described below.\n\nClass description:\nWrapper to handle properly loading the various concept forms\n\nMethod signatures and docstrings:\n- def __init__(self, forms, FormCls, languageWrapper): Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\n- def load(self, concepts): Load the forms\n- def find(self): Find the existing forms", "prompted_full_text": "Implement the Python class `FormsWrapper` described below.\n\nClass description:\nWrapper to handle properly loading the various concept forms\n\nMethod signatures and docstrings:\n- def __init__(self, forms, FormCls, languageWrapper): Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\n- def load(self, concepts): Load the forms\n- def find(self): Find the existing forms\n\n<|skeleton|>\nclass FormsWrapper:\n \"\"\"Wrapper to handle properly loading the various concept forms\"\"\"\n\n def __init__(self, forms, FormCls, languageWrapper):\n \"\"\"Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\"\"\"\n <|body_0|>\n\n def load(self, concepts):\n \"\"\"Load the forms\"\"\"\n <|body_1|>\n\n def find(self):\n \"\"\"Find the existing forms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.eggForms = forms\n self.FormCls = FormCls\n self.conceptIdToForm = {}\n self.languageWrapper = languageWrapper\n<|end_body_0|>\n\n<|body_start_1|>\n for eggForm in self.eggForms:\n if eggForm.conceptId not in self.conceptIdToForm:\n form = self.FormCls(text=eggForm.text, concept=concepts[eggForm.conceptId], language=self.languageWrapper.language)\n self.conceptIdToForm[eggForm.conceptId] = form\n server.db.session.add(form)\n<|end_body_1|>\n\n<|body_start_2|>\n for eggForm in self.eggForms:\n form = self.FormCls.query.filter_by(text=eggForm.text, language=self.languageWrapper.language).first()\n if form is not None:\n self.conceptIdToForm[eggForm.conceptId] = form\n return self.conceptIdToForm\n<|end_body_2|>\n", "revision_id": "f08dc4465b7e4fb32235e1647c46edd4472f9093", "skeleton": "<|skeleton|>\nclass FormsWrapper:\n \"\"\"Wrapper to handle properly loading the various concept forms\"\"\"\n\n def __init__(self, forms, FormCls, languageWrapper):\n \"\"\"Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\"\"\"\n <|body_0|>\n\n def load(self, concepts):\n \"\"\"Load the forms\"\"\"\n <|body_1|>\n\n def find(self):\n \"\"\"Find the existing forms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FormsWrapper:\n \"\"\"Wrapper to handle properly loading the various concept forms\"\"\"\n\n def __init__(self, forms, FormCls, languageWrapper):\n \"\"\"Initialize the Forms Wrapper with the forms from the egg and the corresponding Model Class\"\"\"\n self.eggForms = forms\n self.FormCls = FormCls\n self.conceptIdToForm = {}\n self.languageWrapper = languageWrapper\n\n def load(self, concepts):\n \"\"\"Load the forms\"\"\"\n for eggForm in self.eggForms:\n if eggForm.conceptId not in self.conceptIdToForm:\n form = self.FormCls(text=eggForm.text, concept=concepts[eggForm.conceptId], language=self.languageWrapper.language)\n self.conceptIdToForm[eggForm.conceptId] = form\n server.db.session.add(form)\n\n def find(self):\n \"\"\"Find the existing forms\"\"\"\n for eggForm in self.eggForms:\n form = self.FormCls.query.filter_by(text=eggForm.text, language=self.languageWrapper.language).first()\n if form is not None:\n self.conceptIdToForm[eggForm.conceptId] = form\n return self.conceptIdToForm\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Import/forms_wrapper.py", "source_repo": "cloew/VocabTester", "split": "val", "star_events_count": 0} {"blob_id": "93fa287d8e673dd274278ef31673bb2cdb52b300", "bodies": ["base.Action.__init__(self, self.__loadAtlas)\nself.__overlayList = overlayList\nself.__displayCtx = displayCtx\nself.__frame = frame", "if len(atlases.listAtlases()) == 0:\n atlases.rescanAtlases()\nloadAtlas(self.__frame)"], "bodies_text": "<|body_start_0|>\n base.Action.__init__(self, self.__loadAtlas)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n self.__frame = frame\n<|end_body_0|>\n\n<|body_start_1|>\n if len(atlases.listAtlases()) == 0:\n atlases.rescanAtlases()\n loadAtlas(self.__frame)\n<|end_body_1|>\n", "class_docstring": "The ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.", "class_name": "LoadAtlasAction", "detected_licenses": ["BSD-3-Clause", "CC-BY-3.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoadAtlasAction:\n \"\"\"The ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadAtlas(self):\n \"\"\"Calls the :func:`loadAtlas` function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Action.__init__(self, self.__loadAtlas)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n self.__frame = frame\n<|end_body_0|>\n\n<|body_start_1|>\n if len(atlases.listAtlases()) == 0:\n atlases.rescanAtlases()\n loadAtlas(self.__frame)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000332", "length_bytes": 2121, "license_type": "permissive", "methods": [{"docstring": "Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.", "name": "__init__", "signature": "def __init__(self, overlayList, displayCtx, frame)"}, {"docstring": "Calls the :func:`loadAtlas` function.", "name": "__loadAtlas", "signature": "def __loadAtlas(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004038", "prompt": "Implement the Python class `LoadAtlasAction` described below.\n\nClass description:\nThe ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\n\nMethod signatures and docstrings:\n- def __init__(self, overlayList, displayCtx, frame): Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\n- def __loadAtlas(self): Calls the :func:`loadAtlas` function.", "prompted_full_text": "Implement the Python class `LoadAtlasAction` described below.\n\nClass description:\nThe ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\n\nMethod signatures and docstrings:\n- def __init__(self, overlayList, displayCtx, frame): Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\n- def __loadAtlas(self): Calls the :func:`loadAtlas` function.\n\n<|skeleton|>\nclass LoadAtlasAction:\n \"\"\"The ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadAtlas(self):\n \"\"\"Calls the :func:`loadAtlas` function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Action.__init__(self, self.__loadAtlas)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n self.__frame = frame\n<|end_body_0|>\n\n<|body_start_1|>\n if len(atlases.listAtlases()) == 0:\n atlases.rescanAtlases()\n loadAtlas(self.__frame)\n<|end_body_1|>\n", "revision_id": "46ccb4fe2b2346eb57576247f49714032b61307a", "skeleton": "<|skeleton|>\nclass LoadAtlasAction:\n \"\"\"The ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadAtlas(self):\n \"\"\"Calls the :func:`loadAtlas` function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoadAtlasAction:\n \"\"\"The ``LoadAtlasAction`` prompts the user to select a FSL atlas specification file. This file is then passed to the :func:`.fsl.data.atlases.addAtlas` function, to add the atlas to the :class:`.AtlasRegistry`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadAtlasAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n base.Action.__init__(self, self.__loadAtlas)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n self.__frame = frame\n\n def __loadAtlas(self):\n \"\"\"Calls the :func:`loadAtlas` function.\"\"\"\n if len(atlases.listAtlases()) == 0:\n atlases.rescanAtlases()\n loadAtlas(self.__frame)\n", "source": "the_stack_v2_python_sparse", "source_path": "fsleyes/actions/loadatlas.py", "source_repo": "sanjayankur31/fsleyes", "split": "val", "star_events_count": 1} {"blob_id": "412f56d37953828168a990f05e663165e334e00e", "bodies": ["current_user_reviews = request.user.reviews.all()\nserializer = self.get_serializer(instance=current_user_reviews, many=True)\nreturn Response(serializer.data, status=status.HTTP_200_OK)", "current_user_reviews = request.user.reviews_of.all()\nserializer = self.get_serializer(instance=current_user_reviews, many=True)\nreturn Response(serializer.data, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n current_user_reviews = request.user.reviews.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user_reviews = request.user.reviews_of.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "class_docstring": "ViewSet for viewing reviews.", "class_name": "UserReviewViewSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserReviewViewSet:\n \"\"\"ViewSet for viewing reviews.\"\"\"\n\n def from_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews from current user.\"\"\"\n <|body_0|>\n\n def to_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews about current user.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current_user_reviews = request.user.reviews.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user_reviews = request.user.reviews_of.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000333", "length_bytes": 3759, "license_type": "no_license", "methods": [{"docstring": "Return list of reviews from current user.", "name": "from_me", "signature": "def from_me(self, request, *args, **kwargs)"}, {"docstring": "Return list of reviews about current user.", "name": "to_me", "signature": "def to_me(self, request, *args, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `UserReviewViewSet` described below.\n\nClass description:\nViewSet for viewing reviews.\n\nMethod signatures and docstrings:\n- def from_me(self, request, *args, **kwargs): Return list of reviews from current user.\n- def to_me(self, request, *args, **kwargs): Return list of reviews about current user.", "prompted_full_text": "Implement the Python class `UserReviewViewSet` described below.\n\nClass description:\nViewSet for viewing reviews.\n\nMethod signatures and docstrings:\n- def from_me(self, request, *args, **kwargs): Return list of reviews from current user.\n- def to_me(self, request, *args, **kwargs): Return list of reviews about current user.\n\n<|skeleton|>\nclass UserReviewViewSet:\n \"\"\"ViewSet for viewing reviews.\"\"\"\n\n def from_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews from current user.\"\"\"\n <|body_0|>\n\n def to_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews about current user.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current_user_reviews = request.user.reviews.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n current_user_reviews = request.user.reviews_of.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "revision_id": "0879ade24685b628624dce06698f8a0afd042000", "skeleton": "<|skeleton|>\nclass UserReviewViewSet:\n \"\"\"ViewSet for viewing reviews.\"\"\"\n\n def from_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews from current user.\"\"\"\n <|body_0|>\n\n def to_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews about current user.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserReviewViewSet:\n \"\"\"ViewSet for viewing reviews.\"\"\"\n\n def from_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews from current user.\"\"\"\n current_user_reviews = request.user.reviews.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def to_me(self, request, *args, **kwargs):\n \"\"\"Return list of reviews about current user.\"\"\"\n current_user_reviews = request.user.reviews_of.all()\n serializer = self.get_serializer(instance=current_user_reviews, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "camp-python-2021-find-me-develop/apps/users/api/views.py", "source_repo": "rhanmar/oi_projects_summer_2021", "split": "val", "star_events_count": 0} {"blob_id": "c29fc0c8b35cdaf1c5cb90ab8cd95eacab858509", "bodies": ["super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)\nself.column_source = column_source\nself.cols = cols", "super(ColumnDataChangedEvent, self).dispatch(receiver)\nif hasattr(receiver, '_column_data_changed'):\n receiver._column_data_changed(self)", "from ..util.serialization import transform_column_source_data\ndata_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)\nreturn {'kind': 'ColumnDataChanged', 'column_source': self.column_source.ref, 'new': data_dict, 'cols': self.cols}"], "bodies_text": "<|body_start_0|>\n super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)\n self.column_source = column_source\n self.cols = cols\n<|end_body_0|>\n\n<|body_start_1|>\n super(ColumnDataChangedEvent, self).dispatch(receiver)\n if hasattr(receiver, '_column_data_changed'):\n receiver._column_data_changed(self)\n<|end_body_1|>\n\n<|body_start_2|>\n from ..util.serialization import transform_column_source_data\n data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)\n return {'kind': 'ColumnDataChanged', 'column_source': self.column_source.ref, 'new': data_dict, 'cols': self.cols}\n<|end_body_2|>\n", "class_docstring": "A concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`", "class_name": "ColumnDataChangedEvent", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ColumnDataChangedEvent:\n \"\"\"A concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\"\"\"\n\n def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):\n \"\"\"Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\"\"\"\n <|body_0|>\n\n def dispatch(self, receiver):\n \"\"\"Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\"\"\"\n <|body_1|>\n\n def generate(self, references, buffers):\n \"\"\"Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)\n self.column_source = column_source\n self.cols = cols\n<|end_body_0|>\n\n<|body_start_1|>\n super(ColumnDataChangedEvent, self).dispatch(receiver)\n if hasattr(receiver, '_column_data_changed'):\n receiver._column_data_changed(self)\n<|end_body_1|>\n\n<|body_start_2|>\n from ..util.serialization import transform_column_source_data\n data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)\n return {'kind': 'ColumnDataChanged', 'column_source': self.column_source.ref, 'new': data_dict, 'cols': self.cols}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000334", "length_bytes": 28443, "license_type": "permissive", "methods": [{"docstring": "Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)", "name": "__init__", "signature": "def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None)"}, {"docstring": "Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.", "name": "dispatch", "signature": "def dispatch(self, receiver)"}, {"docstring": "Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.", "name": "generate", "signature": "def generate(self, references, buffers)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000148", "prompt": "Implement the Python class `ColumnDataChangedEvent` described below.\n\nClass description:\nA concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\n\nMethod signatures and docstrings:\n- def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None): Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\n- def dispatch(self, receiver): Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\n- def generate(self, references, buffers): Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.", "prompted_full_text": "Implement the Python class `ColumnDataChangedEvent` described below.\n\nClass description:\nA concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\n\nMethod signatures and docstrings:\n- def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None): Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\n- def dispatch(self, receiver): Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\n- def generate(self, references, buffers): Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.\n\n<|skeleton|>\nclass ColumnDataChangedEvent:\n \"\"\"A concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\"\"\"\n\n def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):\n \"\"\"Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\"\"\"\n <|body_0|>\n\n def dispatch(self, receiver):\n \"\"\"Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\"\"\"\n <|body_1|>\n\n def generate(self, references, buffers):\n \"\"\"Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)\n self.column_source = column_source\n self.cols = cols\n<|end_body_0|>\n\n<|body_start_1|>\n super(ColumnDataChangedEvent, self).dispatch(receiver)\n if hasattr(receiver, '_column_data_changed'):\n receiver._column_data_changed(self)\n<|end_body_1|>\n\n<|body_start_2|>\n from ..util.serialization import transform_column_source_data\n data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)\n return {'kind': 'ColumnDataChanged', 'column_source': self.column_source.ref, 'new': data_dict, 'cols': self.cols}\n<|end_body_2|>\n", "revision_id": "1ad7ec05fb1e3676ac879585296c513c3ee50ef9", "skeleton": "<|skeleton|>\nclass ColumnDataChangedEvent:\n \"\"\"A concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\"\"\"\n\n def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):\n \"\"\"Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\"\"\"\n <|body_0|>\n\n def dispatch(self, receiver):\n \"\"\"Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\"\"\"\n <|body_1|>\n\n def generate(self, references, buffers):\n \"\"\"Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ColumnDataChangedEvent:\n \"\"\"A concrete event representing efficiently replacing *all* existing data for a :class:`~bokeh.models.sources.ColumnDataSource`\"\"\"\n\n def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):\n \"\"\"Args: document (Document) : A Bokeh document that is to be updated. column_source (ColumnDataSource) : cols (list[str]) : optional explicit list of column names to update. If None, all columns will be updated (default: None) setter (ClientSession or ServerSession or None, optional) : This is used to prevent \"boomerang\" updates to Bokeh apps. (default: None) See :class:`~bokeh.document.events.DocumentChangedEvent` for more details. callback_invoker (callable, optional) : A callable that will invoke any Model callbacks that should be executed in response to the change that triggered this event. (default: None)\"\"\"\n super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)\n self.column_source = column_source\n self.cols = cols\n\n def dispatch(self, receiver):\n \"\"\"Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.\"\"\"\n super(ColumnDataChangedEvent, self).dispatch(receiver)\n if hasattr(receiver, '_column_data_changed'):\n receiver._column_data_changed(self)\n\n def generate(self, references, buffers):\n \"\"\"Create a JSON representation of this event suitable for sending to clients. .. code-block:: python { 'kind' : 'ColumnDataChanged' 'column_source' : 'new' : 'cols' : } Args: references (dict[str, Model]) : If the event requires references to certain models in order to function, they may be collected here. **This is an \"out\" parameter**. The values it contains will be modified in-place. buffers (set) : If the event needs to supply any additional Bokeh protocol buffers, they may be added to this set. **This is an \"out\" parameter**. The values it contains will be modified in-place.\"\"\"\n from ..util.serialization import transform_column_source_data\n data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)\n return {'kind': 'ColumnDataChanged', 'column_source': self.column_source.ref, 'new': data_dict, 'cols': self.cols}\n", "source": "the_stack_v2_python_sparse", "source_path": "Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/document/events.py", "source_repo": "holzschu/Carnets", "split": "val", "star_events_count": 541} {"blob_id": "16921f7040deb60d11601a21e855a58ad6a4a188", "bodies": ["project = Project.query.filter_by(slug=project_slug).first_or_404()\nif not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\nreturn project", "try:\n docker_repo_field(project_slug, 'slug')\nexcept ValueError as ex:\n raise WrappedValueError(ex)\nargs = PROJECT_NEW_PARSER.parse_args(strict=True)\nargs = clean_attrs(args)\nargs['slug'] = project_slug\nif 'gitlab_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('gitlab')\nelif 'github_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('github')\nif args['utility']:\n ensure_target_registry(True)\nset_target_registry(args)\nreturn self.handle_write(Project(), data=args)", "project = Project.query.filter_by(slug=project_slug).first_or_404()\nargs = PROJECT_EDIT_PARSER.parse_args(strict=True)\nargs = clean_attrs(args)\nif args.get('utility', project.utility):\n ensure_target_registry(False)\nset_target_registry(args)\nreturn self.handle_write(project, data=args)", "project = Project.query.filter_by(slug=project_slug).first_or_404()\nproject_name = project.name\nproject.purge()\nreturn {'message': '%s deleted' % project_name}"], "bodies_text": "<|body_start_0|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n return project\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n args['slug'] = project_slug\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('gitlab')\n elif 'github_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('github')\n if args['utility']:\n ensure_target_registry(True)\n set_target_registry(args)\n return self.handle_write(Project(), data=args)\n<|end_body_1|>\n\n<|body_start_2|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n set_target_registry(args)\n return self.handle_write(project, data=args)\n<|end_body_2|>\n\n<|body_start_3|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n project_name = project.name\n project.purge()\n return {'message': '%s deleted' % project_name}\n<|end_body_3|>\n", "class_docstring": "API resource to handle getting project details, creating new projects, updating existing projects, and deleting projects", "class_name": "ProjectDetail", "detected_licenses": ["ISC"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectDetail:\n \"\"\"API resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\"\"\"\n\n def get(self, project_slug):\n \"\"\"Get project details\"\"\"\n <|body_0|>\n\n def put(self, project_slug):\n \"\"\"Create a new project\"\"\"\n <|body_1|>\n\n def post(self, project_slug):\n \"\"\"Update an existing project\"\"\"\n <|body_2|>\n\n def delete(self, project_slug):\n \"\"\"Delete a project\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n return project\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n args['slug'] = project_slug\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('gitlab')\n elif 'github_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('github')\n if args['utility']:\n ensure_target_registry(True)\n set_target_registry(args)\n return self.handle_write(Project(), data=args)\n<|end_body_1|>\n\n<|body_start_2|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n set_target_registry(args)\n return self.handle_write(project, data=args)\n<|end_body_2|>\n\n<|body_start_3|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n project_name = project.name\n project.purge()\n return {'message': '%s deleted' % project_name}\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000335", "length_bytes": 11089, "license_type": "permissive", "methods": [{"docstring": "Get project details", "name": "get", "signature": "def get(self, project_slug)"}, {"docstring": "Create a new project", "name": "put", "signature": "def put(self, project_slug)"}, {"docstring": "Update an existing project", "name": "post", "signature": "def post(self, project_slug)"}, {"docstring": "Delete a project", "name": "delete", "signature": "def delete(self, project_slug)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004331", "prompt": "Implement the Python class `ProjectDetail` described below.\n\nClass description:\nAPI resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\n\nMethod signatures and docstrings:\n- def get(self, project_slug): Get project details\n- def put(self, project_slug): Create a new project\n- def post(self, project_slug): Update an existing project\n- def delete(self, project_slug): Delete a project", "prompted_full_text": "Implement the Python class `ProjectDetail` described below.\n\nClass description:\nAPI resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\n\nMethod signatures and docstrings:\n- def get(self, project_slug): Get project details\n- def put(self, project_slug): Create a new project\n- def post(self, project_slug): Update an existing project\n- def delete(self, project_slug): Delete a project\n\n<|skeleton|>\nclass ProjectDetail:\n \"\"\"API resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\"\"\"\n\n def get(self, project_slug):\n \"\"\"Get project details\"\"\"\n <|body_0|>\n\n def put(self, project_slug):\n \"\"\"Create a new project\"\"\"\n <|body_1|>\n\n def post(self, project_slug):\n \"\"\"Update an existing project\"\"\"\n <|body_2|>\n\n def delete(self, project_slug):\n \"\"\"Delete a project\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n return project\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n args['slug'] = project_slug\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('gitlab')\n elif 'github_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('github')\n if args['utility']:\n ensure_target_registry(True)\n set_target_registry(args)\n return self.handle_write(Project(), data=args)\n<|end_body_1|>\n\n<|body_start_2|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n set_target_registry(args)\n return self.handle_write(project, data=args)\n<|end_body_2|>\n\n<|body_start_3|>\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n project_name = project.name\n project.purge()\n return {'message': '%s deleted' % project_name}\n<|end_body_3|>\n", "revision_id": "a4cae55de15a829a3e1b72006f3baa1276e95f30", "skeleton": "<|skeleton|>\nclass ProjectDetail:\n \"\"\"API resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\"\"\"\n\n def get(self, project_slug):\n \"\"\"Get project details\"\"\"\n <|body_0|>\n\n def put(self, project_slug):\n \"\"\"Create a new project\"\"\"\n <|body_1|>\n\n def post(self, project_slug):\n \"\"\"Update an existing project\"\"\"\n <|body_2|>\n\n def delete(self, project_slug):\n \"\"\"Delete a project\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProjectDetail:\n \"\"\"API resource to handle getting project details, creating new projects, updating existing projects, and deleting projects\"\"\"\n\n def get(self, project_slug):\n \"\"\"Get project details\"\"\"\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n return project\n\n def put(self, project_slug):\n \"\"\"Create a new project\"\"\"\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n args['slug'] = project_slug\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('gitlab')\n elif 'github_repo_id' in args:\n args['external_auth_token'] = current_user.oauth_token_for('github')\n if args['utility']:\n ensure_target_registry(True)\n set_target_registry(args)\n return self.handle_write(Project(), data=args)\n\n def post(self, project_slug):\n \"\"\"Update an existing project\"\"\"\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n set_target_registry(args)\n return self.handle_write(project, data=args)\n\n def delete(self, project_slug):\n \"\"\"Delete a project\"\"\"\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n project_name = project.name\n project.purge()\n return {'message': '%s deleted' % project_name}\n", "source": "the_stack_v2_python_sparse", "source_path": "dockci/api/project.py", "source_repo": "sprucedev/DockCI", "split": "val", "star_events_count": 1} {"blob_id": "43d55ab51133c0af8cccc5c42c97e2115b715a3a", "bodies": ["assert isinstance(root, Node), 'Invalid node %s' % root\nassert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\nif invoker.method != GET:\n return False\nif isinstance(invoker.output, Iter):\n assert isinstance(invoker.output, Iter)\n output = invoker.output.itemType\n isList = True\nelse:\n output = invoker.output\n isList = False\nif isinstance(output, (TypeModel, TypeModelProperty)):\n model = output.container\nelse:\n log.info('Cannot extract model from output type %s', output)\n return False\nassert isinstance(model, Model)\nmandatory = [inp for inp in invoker.inputs[:invoker.mandatory] if isinstance(inp.type, TypeModelProperty)]\nextra = [inp for inp in invoker.inputs[invoker.mandatory:] if isinstance(inp.type, TypeModelProperty)]\nnodes = self.nodesFor(root, model, isList, mandatory, extra, invoker.hints)\nif not nodes:\n return False\nfor node in nodes:\n assert isinstance(node, Node)\n node.get = self.processInvokerHints(invoker, node.get)\n log.info('Resolved invoker %s as a get for node %s', invoker, node)\nreturn True", "assert isinstance(root, Node), 'Invalid node %s' % root\nassert isinstance(model, Model), 'Invalid model %s' % model\nassert isinstance(mandatory, list), 'Invalid mandatory list %s' % mandatory\nassert isinstance(optional, list), 'Invalid optional list %s' % optional\nnodes = []\nfor extra in chain(*(combinations(optional, k) for k in range(0, len(optional) + 1))):\n types = list(mandatory)\n types.extend(extra)\n if not self.isModelIn(model, types):\n types.append(model)\n node = self.obtainNode(root, self.processTypesHints(types, hints, isGroup))\n if node:\n nodes.append(node)\nreturn nodes"], "bodies_text": "<|body_start_0|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if invoker.method != GET:\n return False\n if isinstance(invoker.output, Iter):\n assert isinstance(invoker.output, Iter)\n output = invoker.output.itemType\n isList = True\n else:\n output = invoker.output\n isList = False\n if isinstance(output, (TypeModel, TypeModelProperty)):\n model = output.container\n else:\n log.info('Cannot extract model from output type %s', output)\n return False\n assert isinstance(model, Model)\n mandatory = [inp for inp in invoker.inputs[:invoker.mandatory] if isinstance(inp.type, TypeModelProperty)]\n extra = [inp for inp in invoker.inputs[invoker.mandatory:] if isinstance(inp.type, TypeModelProperty)]\n nodes = self.nodesFor(root, model, isList, mandatory, extra, invoker.hints)\n if not nodes:\n return False\n for node in nodes:\n assert isinstance(node, Node)\n node.get = self.processInvokerHints(invoker, node.get)\n log.info('Resolved invoker %s as a get for node %s', invoker, node)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(model, Model), 'Invalid model %s' % model\n assert isinstance(mandatory, list), 'Invalid mandatory list %s' % mandatory\n assert isinstance(optional, list), 'Invalid optional list %s' % optional\n nodes = []\n for extra in chain(*(combinations(optional, k) for k in range(0, len(optional) + 1))):\n types = list(mandatory)\n types.extend(extra)\n if not self.isModelIn(model, types):\n types.append(model)\n node = self.obtainNode(root, self.processTypesHints(types, hints, isGroup))\n if node:\n nodes.append(node)\n return nodes\n<|end_body_1|>\n", "class_docstring": "Resolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.", "class_name": "AssembleGet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AssembleGet:\n \"\"\"Resolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\"\"\"\n\n def assembleInvoker(self, root, invoker):\n \"\"\"@see: AssembleOneByOne.assembleInvoker\"\"\"\n <|body_0|>\n\n def nodesFor(self, root, model, isGroup, mandatory, optional, hints):\n \"\"\"Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if invoker.method != GET:\n return False\n if isinstance(invoker.output, Iter):\n assert isinstance(invoker.output, Iter)\n output = invoker.output.itemType\n isList = True\n else:\n output = invoker.output\n isList = False\n if isinstance(output, (TypeModel, TypeModelProperty)):\n model = output.container\n else:\n log.info('Cannot extract model from output type %s', output)\n return False\n assert isinstance(model, Model)\n mandatory = [inp for inp in invoker.inputs[:invoker.mandatory] if isinstance(inp.type, TypeModelProperty)]\n extra = [inp for inp in invoker.inputs[invoker.mandatory:] if isinstance(inp.type, TypeModelProperty)]\n nodes = self.nodesFor(root, model, isList, mandatory, extra, invoker.hints)\n if not nodes:\n return False\n for node in nodes:\n assert isinstance(node, Node)\n node.get = self.processInvokerHints(invoker, node.get)\n log.info('Resolved invoker %s as a get for node %s', invoker, node)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(model, Model), 'Invalid model %s' % model\n assert isinstance(mandatory, list), 'Invalid mandatory list %s' % mandatory\n assert isinstance(optional, list), 'Invalid optional list %s' % optional\n nodes = []\n for extra in chain(*(combinations(optional, k) for k in range(0, len(optional) + 1))):\n types = list(mandatory)\n types.extend(extra)\n if not self.isModelIn(model, types):\n types.append(model)\n node = self.obtainNode(root, self.processTypesHints(types, hints, isGroup))\n if node:\n nodes.append(node)\n return nodes\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000336", "length_bytes": 24625, "license_type": "no_license", "methods": [{"docstring": "@see: AssembleOneByOne.assembleInvoker", "name": "assembleInvoker", "signature": "def assembleInvoker(self, root, invoker)"}, {"docstring": "Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.", "name": "nodesFor", "signature": "def nodesFor(self, root, model, isGroup, mandatory, optional, hints)"}], "n_methods": 2, "prompt": "Implement the Python class `AssembleGet` described below.\n\nClass description:\nResolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\n\nMethod signatures and docstrings:\n- def assembleInvoker(self, root, invoker): @see: AssembleOneByOne.assembleInvoker\n- def nodesFor(self, root, model, isGroup, mandatory, optional, hints): Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.", "prompted_full_text": "Implement the Python class `AssembleGet` described below.\n\nClass description:\nResolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\n\nMethod signatures and docstrings:\n- def assembleInvoker(self, root, invoker): @see: AssembleOneByOne.assembleInvoker\n- def nodesFor(self, root, model, isGroup, mandatory, optional, hints): Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.\n\n<|skeleton|>\nclass AssembleGet:\n \"\"\"Resolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\"\"\"\n\n def assembleInvoker(self, root, invoker):\n \"\"\"@see: AssembleOneByOne.assembleInvoker\"\"\"\n <|body_0|>\n\n def nodesFor(self, root, model, isGroup, mandatory, optional, hints):\n \"\"\"Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if invoker.method != GET:\n return False\n if isinstance(invoker.output, Iter):\n assert isinstance(invoker.output, Iter)\n output = invoker.output.itemType\n isList = True\n else:\n output = invoker.output\n isList = False\n if isinstance(output, (TypeModel, TypeModelProperty)):\n model = output.container\n else:\n log.info('Cannot extract model from output type %s', output)\n return False\n assert isinstance(model, Model)\n mandatory = [inp for inp in invoker.inputs[:invoker.mandatory] if isinstance(inp.type, TypeModelProperty)]\n extra = [inp for inp in invoker.inputs[invoker.mandatory:] if isinstance(inp.type, TypeModelProperty)]\n nodes = self.nodesFor(root, model, isList, mandatory, extra, invoker.hints)\n if not nodes:\n return False\n for node in nodes:\n assert isinstance(node, Node)\n node.get = self.processInvokerHints(invoker, node.get)\n log.info('Resolved invoker %s as a get for node %s', invoker, node)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(model, Model), 'Invalid model %s' % model\n assert isinstance(mandatory, list), 'Invalid mandatory list %s' % mandatory\n assert isinstance(optional, list), 'Invalid optional list %s' % optional\n nodes = []\n for extra in chain(*(combinations(optional, k) for k in range(0, len(optional) + 1))):\n types = list(mandatory)\n types.extend(extra)\n if not self.isModelIn(model, types):\n types.append(model)\n node = self.obtainNode(root, self.processTypesHints(types, hints, isGroup))\n if node:\n nodes.append(node)\n return nodes\n<|end_body_1|>\n", "revision_id": "a10cb774c8cbc5010950eed9342413846734fea7", "skeleton": "<|skeleton|>\nclass AssembleGet:\n \"\"\"Resolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\"\"\"\n\n def assembleInvoker(self, root, invoker):\n \"\"\"@see: AssembleOneByOne.assembleInvoker\"\"\"\n <|body_0|>\n\n def nodesFor(self, root, model, isGroup, mandatory, optional, hints):\n \"\"\"Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AssembleGet:\n \"\"\"Resolving the GET method invokers. Method signature needs to be flagged with GET and look like: AnyEntity|AnyEntity.Property|Iter(AnyEntity)|Iter(AnyEntity.Id) % ([...AnyEntity.Property]) !!!Attention the order of the mandatory arguments is crucial since based on that the call is placed in the REST Node tree.\"\"\"\n\n def assembleInvoker(self, root, invoker):\n \"\"\"@see: AssembleOneByOne.assembleInvoker\"\"\"\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if invoker.method != GET:\n return False\n if isinstance(invoker.output, Iter):\n assert isinstance(invoker.output, Iter)\n output = invoker.output.itemType\n isList = True\n else:\n output = invoker.output\n isList = False\n if isinstance(output, (TypeModel, TypeModelProperty)):\n model = output.container\n else:\n log.info('Cannot extract model from output type %s', output)\n return False\n assert isinstance(model, Model)\n mandatory = [inp for inp in invoker.inputs[:invoker.mandatory] if isinstance(inp.type, TypeModelProperty)]\n extra = [inp for inp in invoker.inputs[invoker.mandatory:] if isinstance(inp.type, TypeModelProperty)]\n nodes = self.nodesFor(root, model, isList, mandatory, extra, invoker.hints)\n if not nodes:\n return False\n for node in nodes:\n assert isinstance(node, Node)\n node.get = self.processInvokerHints(invoker, node.get)\n log.info('Resolved invoker %s as a get for node %s', invoker, node)\n return True\n\n def nodesFor(self, root, model, isGroup, mandatory, optional, hints):\n \"\"\"Provides all the nodes for the provided model obtained by combining the mandatory and extra types. @param root: Node The root node to assemble to. @param model: Model The model to obtain the nodes for. @param isGroup: boolean Flag indicating that the model is actually provided as a collection. @param mandatory: list[Input] The mandatory inputs. @param optional: list[Input] The optional inputs. @param hints: dictionary{string, object} The hints for the invoker.\"\"\"\n assert isinstance(root, Node), 'Invalid node %s' % root\n assert isinstance(model, Model), 'Invalid model %s' % model\n assert isinstance(mandatory, list), 'Invalid mandatory list %s' % mandatory\n assert isinstance(optional, list), 'Invalid optional list %s' % optional\n nodes = []\n for extra in chain(*(combinations(optional, k) for k in range(0, len(optional) + 1))):\n types = list(mandatory)\n types.extend(extra)\n if not self.isModelIn(model, types):\n types.append(model)\n node = self.obtainNode(root, self.processTypesHints(types, hints, isGroup))\n if node:\n nodes.append(node)\n return nodes\n", "source": "the_stack_v2_python_sparse", "source_path": "components/ally-core/ally/core/impl/assembler.py", "source_repo": "bonomali/Ally-Py", "split": "val", "star_events_count": 0} {"blob_id": "dc16e769196f79512709d210cb1f5274f9cf53da", "bodies": ["result = []\ni, j, carry = (len(num1) - 1, len(num2) - 1, 0)\nwhile i >= 0 or j >= 0 or carry:\n if i >= 0:\n carry += ord(num1[i]) - ord('0')\n i -= 1\n if j >= 0:\n carry += ord(num2[j]) - ord('0')\n j -= 1\n result.append(str(carry % 10))\n carry /= 10\nresult.reverse()\nreturn ''.join(result)", "length = max(len(num1), len(num2))\nnum1 = num1.zfill(length)[::-1]\nnum2 = num2.zfill(length)[::-1]\nres, plus = ('', 0)\nfor index, num in enumerate(num1):\n tmp = str(int(num) + int(num2[index]) + plus)\n res += tmp[-1]\n if int(tmp) > 9:\n plus = 1\n else:\n plus = 0\nif plus:\n res += '1'\nreturn res[::-1]"], "bodies_text": "<|body_start_0|>\n result = []\n i, j, carry = (len(num1) - 1, len(num2) - 1, 0)\n while i >= 0 or j >= 0 or carry:\n if i >= 0:\n carry += ord(num1[i]) - ord('0')\n i -= 1\n if j >= 0:\n carry += ord(num2[j]) - ord('0')\n j -= 1\n result.append(str(carry % 10))\n carry /= 10\n result.reverse()\n return ''.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n length = max(len(num1), len(num2))\n num1 = num1.zfill(length)[::-1]\n num2 = num2.zfill(length)[::-1]\n res, plus = ('', 0)\n for index, num in enumerate(num1):\n tmp = str(int(num) + int(num2[index]) + plus)\n res += tmp[-1]\n if int(tmp) > 9:\n plus = 1\n else:\n plus = 0\n if plus:\n res += '1'\n return res[::-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def addStrings(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_0|>\n\n def addStrings2(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n i, j, carry = (len(num1) - 1, len(num2) - 1, 0)\n while i >= 0 or j >= 0 or carry:\n if i >= 0:\n carry += ord(num1[i]) - ord('0')\n i -= 1\n if j >= 0:\n carry += ord(num2[j]) - ord('0')\n j -= 1\n result.append(str(carry % 10))\n carry /= 10\n result.reverse()\n return ''.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n length = max(len(num1), len(num2))\n num1 = num1.zfill(length)[::-1]\n num2 = num2.zfill(length)[::-1]\n res, plus = ('', 0)\n for index, num in enumerate(num1):\n tmp = str(int(num) + int(num2[index]) + plus)\n res += tmp[-1]\n if int(tmp) > 9:\n plus = 1\n else:\n plus = 0\n if plus:\n res += '1'\n return res[::-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000337", "length_bytes": 2315, "license_type": "permissive", "methods": [{"docstring": ":type num1: str :type num2: str :rtype: str", "name": "addStrings", "signature": "def addStrings(self, num1, num2)"}, {"docstring": ":type num1: str :type num2: str :rtype: str", "name": "addStrings2", "signature": "def addStrings2(self, num1, num2)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addStrings(self, num1, num2): :type num1: str :type num2: str :rtype: str\n- def addStrings2(self, num1, num2): :type num1: str :type num2: str :rtype: str", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addStrings(self, num1, num2): :type num1: str :type num2: str :rtype: str\n- def addStrings2(self, num1, num2): :type num1: str :type num2: str :rtype: str\n\n<|skeleton|>\nclass Solution:\n\n def addStrings(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_0|>\n\n def addStrings2(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n i, j, carry = (len(num1) - 1, len(num2) - 1, 0)\n while i >= 0 or j >= 0 or carry:\n if i >= 0:\n carry += ord(num1[i]) - ord('0')\n i -= 1\n if j >= 0:\n carry += ord(num2[j]) - ord('0')\n j -= 1\n result.append(str(carry % 10))\n carry /= 10\n result.reverse()\n return ''.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n length = max(len(num1), len(num2))\n num1 = num1.zfill(length)[::-1]\n num2 = num2.zfill(length)[::-1]\n res, plus = ('', 0)\n for index, num in enumerate(num1):\n tmp = str(int(num) + int(num2[index]) + plus)\n res += tmp[-1]\n if int(tmp) > 9:\n plus = 1\n else:\n plus = 0\n if plus:\n res += '1'\n return res[::-1]\n<|end_body_1|>\n", "revision_id": "0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def addStrings(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_0|>\n\n def addStrings2(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def addStrings(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n result = []\n i, j, carry = (len(num1) - 1, len(num2) - 1, 0)\n while i >= 0 or j >= 0 or carry:\n if i >= 0:\n carry += ord(num1[i]) - ord('0')\n i -= 1\n if j >= 0:\n carry += ord(num2[j]) - ord('0')\n j -= 1\n result.append(str(carry % 10))\n carry /= 10\n result.reverse()\n return ''.join(result)\n\n def addStrings2(self, num1, num2):\n \"\"\":type num1: str :type num2: str :rtype: str\"\"\"\n length = max(len(num1), len(num2))\n num1 = num1.zfill(length)[::-1]\n num2 = num2.zfill(length)[::-1]\n res, plus = ('', 0)\n for index, num in enumerate(num1):\n tmp = str(int(num) + int(num2[index]) + plus)\n res += tmp[-1]\n if int(tmp) > 9:\n plus = 1\n else:\n plus = 0\n if plus:\n res += '1'\n return res[::-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "cs15211/AddStrings.py", "source_repo": "JulyKikuAkita/PythonPrac", "split": "val", "star_events_count": 1} {"blob_id": "748fd9fbff8d2bffaa672eeb189d44c734e754ef", "bodies": ["if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)\n inv_lines_values = {'name': 'Rent Cost for' + self.tenancy_id.name, 'quantity': 1, 'price_unit': self.amount or 0.0, 'account_id': self.tenancy_id.property_id.account_depreciation_expense_id.id or False, 'analytic_account_id': self.tenancy_id.id or False}\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {'partner_id': self.tenancy_id.property_owner_id.id or False, 'type': 'in_invoice', 'invoice_line_ids': [(0, 0, inv_lines_values)], 'property_id': self.tenancy_id.property_id.id or False, 'invoice_date': self.start_date or False, 'new_tenancy_id': self.tenancy_id.id, 'journal_id': account_jrnl_obj.id or False}\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': self._context}", "context = dict(self._context or {})\nwiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\nreturn {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': context}"], "bodies_text": "<|body_start_0|>\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)\n inv_lines_values = {'name': 'Rent Cost for' + self.tenancy_id.name, 'quantity': 1, 'price_unit': self.amount or 0.0, 'account_id': self.tenancy_id.property_id.account_depreciation_expense_id.id or False, 'analytic_account_id': self.tenancy_id.id or False}\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {'partner_id': self.tenancy_id.property_owner_id.id or False, 'type': 'in_invoice', 'invoice_line_ids': [(0, 0, inv_lines_values)], 'property_id': self.tenancy_id.property_id.id or False, 'invoice_date': self.start_date or False, 'new_tenancy_id': self.tenancy_id.id, 'journal_id': account_jrnl_obj.id or False}\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': self._context}\n<|end_body_0|>\n\n<|body_start_1|>\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': context}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TenancyRentSchedule", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TenancyRentSchedule:\n\n def create_landlord_invoice(self):\n \"\"\"Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\"\"\"\n <|body_0|>\n\n def open_landlord_invoice(self):\n \"\"\"Description: This method is used to open Invoice which is created. Decorators: api.multi\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)\n inv_lines_values = {'name': 'Rent Cost for' + self.tenancy_id.name, 'quantity': 1, 'price_unit': self.amount or 0.0, 'account_id': self.tenancy_id.property_id.account_depreciation_expense_id.id or False, 'analytic_account_id': self.tenancy_id.id or False}\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {'partner_id': self.tenancy_id.property_owner_id.id or False, 'type': 'in_invoice', 'invoice_line_ids': [(0, 0, inv_lines_values)], 'property_id': self.tenancy_id.property_id.id or False, 'invoice_date': self.start_date or False, 'new_tenancy_id': self.tenancy_id.id, 'journal_id': account_jrnl_obj.id or False}\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': self._context}\n<|end_body_0|>\n\n<|body_start_1|>\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': context}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000338", "length_bytes": 18494, "license_type": "no_license", "methods": [{"docstring": "Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer", "name": "create_landlord_invoice", "signature": "def create_landlord_invoice(self)"}, {"docstring": "Description: This method is used to open Invoice which is created. Decorators: api.multi", "name": "open_landlord_invoice", "signature": "def open_landlord_invoice(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005608", "prompt": "Implement the Python class `TenancyRentSchedule` described below.\n\nClass description:\nImplement the TenancyRentSchedule class.\n\nMethod signatures and docstrings:\n- def create_landlord_invoice(self): Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\n- def open_landlord_invoice(self): Description: This method is used to open Invoice which is created. Decorators: api.multi", "prompted_full_text": "Implement the Python class `TenancyRentSchedule` described below.\n\nClass description:\nImplement the TenancyRentSchedule class.\n\nMethod signatures and docstrings:\n- def create_landlord_invoice(self): Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\n- def open_landlord_invoice(self): Description: This method is used to open Invoice which is created. Decorators: api.multi\n\n<|skeleton|>\nclass TenancyRentSchedule:\n\n def create_landlord_invoice(self):\n \"\"\"Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\"\"\"\n <|body_0|>\n\n def open_landlord_invoice(self):\n \"\"\"Description: This method is used to open Invoice which is created. Decorators: api.multi\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)\n inv_lines_values = {'name': 'Rent Cost for' + self.tenancy_id.name, 'quantity': 1, 'price_unit': self.amount or 0.0, 'account_id': self.tenancy_id.property_id.account_depreciation_expense_id.id or False, 'analytic_account_id': self.tenancy_id.id or False}\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {'partner_id': self.tenancy_id.property_owner_id.id or False, 'type': 'in_invoice', 'invoice_line_ids': [(0, 0, inv_lines_values)], 'property_id': self.tenancy_id.property_id.id or False, 'invoice_date': self.start_date or False, 'new_tenancy_id': self.tenancy_id.id, 'journal_id': account_jrnl_obj.id or False}\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': self._context}\n<|end_body_0|>\n\n<|body_start_1|>\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': context}\n<|end_body_1|>\n", "revision_id": "163136f382faa8607db8fb6cda42a5ba07c4076b", "skeleton": "<|skeleton|>\nclass TenancyRentSchedule:\n\n def create_landlord_invoice(self):\n \"\"\"Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\"\"\"\n <|body_0|>\n\n def open_landlord_invoice(self):\n \"\"\"Description: This method is used to open Invoice which is created. Decorators: api.multi\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TenancyRentSchedule:\n def create_landlord_invoice(self):\n \"\"\"Create invoice for Rent Schedule. ------------------------------------ @param self: The object pointer\"\"\"\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)\n inv_lines_values = {'name': 'Rent Cost for' + self.tenancy_id.name, 'quantity': 1, 'price_unit': self.amount or 0.0, 'account_id': self.tenancy_id.property_id.account_depreciation_expense_id.id or False, 'analytic_account_id': self.tenancy_id.id or False}\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {'partner_id': self.tenancy_id.property_owner_id.id or False, 'type': 'in_invoice', 'invoice_line_ids': [(0, 0, inv_lines_values)], 'property_id': self.tenancy_id.property_id.id or False, 'invoice_date': self.start_date or False, 'new_tenancy_id': self.tenancy_id.id, 'journal_id': account_jrnl_obj.id or False}\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': self._context}\n\n def open_landlord_invoice(self):\n \"\"\"Description: This method is used to open Invoice which is created. Decorators: api.multi\"\"\"\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference('account', 'view_move_form')[1]\n return {'view_type': 'form', 'view_id': wiz_form_id, 'view_mode': 'form', 'res_model': 'account.move', 'res_id': self.invc_id.id, 'type': 'ir.actions.act_window', 'target': 'current', 'context': context}\n", "source": "the_stack_v2_python_sparse", "source_path": "property_landlord_management_ee/models/account_analytic.py", "source_repo": "maarejsys/Roya", "split": "val", "star_events_count": 0} {"blob_id": "ca09b011e46cc3e2540532ef0366740189e496e3", "bodies": ["linear.drop_data()\nself.assertEqual(linear.show_available_products(), {})\nwith self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\nresult = linear.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\nself.assertEqual(result[0][0], 1000)\nself.assertEqual(result[0][1], 0)\nself.assertEqual(result[0][2], 1000)\nself.assertEqual(result[1][0], 1000)\nself.assertEqual(result[1][1], 0)\nself.assertEqual(result[1][2], 1000)\nlinear_available = linear.show_available_products()\nparallel.drop_data()\nself.assertEqual(parallel.show_available_products(), {})\nwith self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\nresult = parallel.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\nself.assertEqual(result[0][0], 1000)\nself.assertEqual(result[0][1], 0)\nself.assertEqual(result[0][2], 1000)\nself.assertEqual(result[1][0], 1000)\nself.assertEqual(result[1][1], 0)\nself.assertEqual(result[1][2], 1000)\nself.assertEqual(linear_available, parallel.show_available_products())", "cust_1 = {'name': 'George Washington', 'address': '4 Bowling Green', 'phone_number': '2125555555', 'email': 'george@governmenthouse.com'}\ncust_2 = {'name': 'John Adams', 'address': '524-30 Market St', 'phone_number': '2675551212', 'email': 'john@presidentshouse.com'}\ncust_3 = {'name': 'Thomas Jefferson', 'address': '1600 Pennsylvania Ave', 'phone_number': '2029999999', 'email': 'thomas@whitehouse.gov'}\nresult = linear.show_rentals('prod_1')\nself.assertEqual(result['cust_1'], cust_1)\nself.assertEqual(result['cust_3'], cust_3)\nresult = linear.show_rentals('prod_3')\nself.assertEqual(result['cust_1'], cust_1)\nself.assertEqual(result['cust_2'], cust_2)\nself.assertEqual(result['cust_3'], cust_3)\nresult = linear.show_rentals('prod_4')\nself.assertEqual(result['cust_2'], cust_2)\nself.assertEqual(result['cust_3'], cust_3)\nresult = linear.show_rentals('prod_5')\nself.assertEqual(result['cust_3'], cust_3)\nresult = linear.show_rentals('prod_0')\nself.assertEqual(result, {})\nself.assertEqual(linear.show_rentals('prod_1'), parallel.show_rentals('prod_1'))"], "bodies_text": "<|body_start_0|>\n linear.drop_data()\n self.assertEqual(linear.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = linear.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n linear_available = linear.show_available_products()\n parallel.drop_data()\n self.assertEqual(parallel.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = parallel.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n self.assertEqual(linear_available, parallel.show_available_products())\n<|end_body_0|>\n\n<|body_start_1|>\n cust_1 = {'name': 'George Washington', 'address': '4 Bowling Green', 'phone_number': '2125555555', 'email': 'george@governmenthouse.com'}\n cust_2 = {'name': 'John Adams', 'address': '524-30 Market St', 'phone_number': '2675551212', 'email': 'john@presidentshouse.com'}\n cust_3 = {'name': 'Thomas Jefferson', 'address': '1600 Pennsylvania Ave', 'phone_number': '2029999999', 'email': 'thomas@whitehouse.gov'}\n result = linear.show_rentals('prod_1')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_3')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_4')\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_5')\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_0')\n self.assertEqual(result, {})\n self.assertEqual(linear.show_rentals('prod_1'), parallel.show_rentals('prod_1'))\n<|end_body_1|>\n", "class_docstring": "Tests for population and data integrity of database.", "class_name": "RentalDbTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RentalDbTest:\n \"\"\"Tests for population and data integrity of database.\"\"\"\n\n def test_1_import(self):\n \"\"\"Test that the records are successfully imported.\"\"\"\n <|body_0|>\n\n def test_2_show_rentals(self):\n \"\"\"Test the integrity of the returned dictionary of active rentals.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n linear.drop_data()\n self.assertEqual(linear.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = linear.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n linear_available = linear.show_available_products()\n parallel.drop_data()\n self.assertEqual(parallel.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = parallel.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n self.assertEqual(linear_available, parallel.show_available_products())\n<|end_body_0|>\n\n<|body_start_1|>\n cust_1 = {'name': 'George Washington', 'address': '4 Bowling Green', 'phone_number': '2125555555', 'email': 'george@governmenthouse.com'}\n cust_2 = {'name': 'John Adams', 'address': '524-30 Market St', 'phone_number': '2675551212', 'email': 'john@presidentshouse.com'}\n cust_3 = {'name': 'Thomas Jefferson', 'address': '1600 Pennsylvania Ave', 'phone_number': '2029999999', 'email': 'thomas@whitehouse.gov'}\n result = linear.show_rentals('prod_1')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_3')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_4')\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_5')\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_0')\n self.assertEqual(result, {})\n self.assertEqual(linear.show_rentals('prod_1'), parallel.show_rentals('prod_1'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000339", "length_bytes": 3511, "license_type": "no_license", "methods": [{"docstring": "Test that the records are successfully imported.", "name": "test_1_import", "signature": "def test_1_import(self)"}, {"docstring": "Test the integrity of the returned dictionary of active rentals.", "name": "test_2_show_rentals", "signature": "def test_2_show_rentals(self)"}], "n_methods": 2, "prompt": "Implement the Python class `RentalDbTest` described below.\n\nClass description:\nTests for population and data integrity of database.\n\nMethod signatures and docstrings:\n- def test_1_import(self): Test that the records are successfully imported.\n- def test_2_show_rentals(self): Test the integrity of the returned dictionary of active rentals.", "prompted_full_text": "Implement the Python class `RentalDbTest` described below.\n\nClass description:\nTests for population and data integrity of database.\n\nMethod signatures and docstrings:\n- def test_1_import(self): Test that the records are successfully imported.\n- def test_2_show_rentals(self): Test the integrity of the returned dictionary of active rentals.\n\n<|skeleton|>\nclass RentalDbTest:\n \"\"\"Tests for population and data integrity of database.\"\"\"\n\n def test_1_import(self):\n \"\"\"Test that the records are successfully imported.\"\"\"\n <|body_0|>\n\n def test_2_show_rentals(self):\n \"\"\"Test the integrity of the returned dictionary of active rentals.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n linear.drop_data()\n self.assertEqual(linear.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = linear.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n linear_available = linear.show_available_products()\n parallel.drop_data()\n self.assertEqual(parallel.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = parallel.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n self.assertEqual(linear_available, parallel.show_available_products())\n<|end_body_0|>\n\n<|body_start_1|>\n cust_1 = {'name': 'George Washington', 'address': '4 Bowling Green', 'phone_number': '2125555555', 'email': 'george@governmenthouse.com'}\n cust_2 = {'name': 'John Adams', 'address': '524-30 Market St', 'phone_number': '2675551212', 'email': 'john@presidentshouse.com'}\n cust_3 = {'name': 'Thomas Jefferson', 'address': '1600 Pennsylvania Ave', 'phone_number': '2029999999', 'email': 'thomas@whitehouse.gov'}\n result = linear.show_rentals('prod_1')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_3')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_4')\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_5')\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_0')\n self.assertEqual(result, {})\n self.assertEqual(linear.show_rentals('prod_1'), parallel.show_rentals('prod_1'))\n<|end_body_1|>\n", "revision_id": "5dac60f39e3909ff05b26721d602ed20f14d6be3", "skeleton": "<|skeleton|>\nclass RentalDbTest:\n \"\"\"Tests for population and data integrity of database.\"\"\"\n\n def test_1_import(self):\n \"\"\"Test that the records are successfully imported.\"\"\"\n <|body_0|>\n\n def test_2_show_rentals(self):\n \"\"\"Test the integrity of the returned dictionary of active rentals.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RentalDbTest:\n \"\"\"Tests for population and data integrity of database.\"\"\"\n\n def test_1_import(self):\n \"\"\"Test that the records are successfully imported.\"\"\"\n linear.drop_data()\n self.assertEqual(linear.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = linear.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n linear_available = linear.show_available_products()\n parallel.drop_data()\n self.assertEqual(parallel.show_available_products(), {})\n with self.assertRaises(FileNotFoundError):\n result = linear.import_data('data2', 'p.csv', 'c.csv', 'r.csv')\n result = parallel.import_data('data', 'products.csv', 'customers.csv', 'rentals.csv')\n self.assertEqual(result[0][0], 1000)\n self.assertEqual(result[0][1], 0)\n self.assertEqual(result[0][2], 1000)\n self.assertEqual(result[1][0], 1000)\n self.assertEqual(result[1][1], 0)\n self.assertEqual(result[1][2], 1000)\n self.assertEqual(linear_available, parallel.show_available_products())\n\n def test_2_show_rentals(self):\n \"\"\"Test the integrity of the returned dictionary of active rentals.\"\"\"\n cust_1 = {'name': 'George Washington', 'address': '4 Bowling Green', 'phone_number': '2125555555', 'email': 'george@governmenthouse.com'}\n cust_2 = {'name': 'John Adams', 'address': '524-30 Market St', 'phone_number': '2675551212', 'email': 'john@presidentshouse.com'}\n cust_3 = {'name': 'Thomas Jefferson', 'address': '1600 Pennsylvania Ave', 'phone_number': '2029999999', 'email': 'thomas@whitehouse.gov'}\n result = linear.show_rentals('prod_1')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_3')\n self.assertEqual(result['cust_1'], cust_1)\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_4')\n self.assertEqual(result['cust_2'], cust_2)\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_5')\n self.assertEqual(result['cust_3'], cust_3)\n result = linear.show_rentals('prod_0')\n self.assertEqual(result, {})\n self.assertEqual(linear.show_rentals('prod_1'), parallel.show_rentals('prod_1'))\n", "source": "the_stack_v2_python_sparse", "source_path": "students/shodges/lesson07/test_database.py", "source_repo": "JavaRod/SP_Python220B_2019", "split": "val", "star_events_count": 1} {"blob_id": "c4f182e9f89e6aa86212bd5968deaf039337d427", "bodies": ["res = []\n\ndef preorder(root):\n if root == None:\n return\n res.append(str(root.val))\n if root.children == []:\n res.append('None')\n res.append('None')\n for child in root.children:\n preorder(child)\npreorder(root)\nprint(','.join(res))\nreturn ','.join(res)", "self.data = collections.deque(data.split(','))\n\ndef dePreorder():\n print(self.data)\n if self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return\n root = Node(int(self.data.popleft()), [])\n print(root.val)\n while len(self.data) > 0 and self.data[0] != 'None':\n root.children.append(dePreorder())\n if len(self.data) > 0 and self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return root\nres = dePreorder()\nreturn res"], "bodies_text": "<|body_start_0|>\n res = []\n\n def preorder(root):\n if root == None:\n return\n res.append(str(root.val))\n if root.children == []:\n res.append('None')\n res.append('None')\n for child in root.children:\n preorder(child)\n preorder(root)\n print(','.join(res))\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data = collections.deque(data.split(','))\n\n def dePreorder():\n print(self.data)\n if self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return\n root = Node(int(self.data.popleft()), [])\n print(root.val)\n while len(self.data) > 0 and self.data[0] != 'None':\n root.children.append(dePreorder())\n if len(self.data) > 0 and self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return root\n res = dePreorder()\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def preorder(root):\n if root == None:\n return\n res.append(str(root.val))\n if root.children == []:\n res.append('None')\n res.append('None')\n for child in root.children:\n preorder(child)\n preorder(root)\n print(','.join(res))\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data = collections.deque(data.split(','))\n\n def dePreorder():\n print(self.data)\n if self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return\n root = Node(int(self.data.popleft()), [])\n print(root.val)\n while len(self.data) > 0 and self.data[0] != 'None':\n root.children.append(dePreorder())\n if len(self.data) > 0 and self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return root\n res = dePreorder()\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000340", "length_bytes": 2063, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000157", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def preorder(root):\n if root == None:\n return\n res.append(str(root.val))\n if root.children == []:\n res.append('None')\n res.append('None')\n for child in root.children:\n preorder(child)\n preorder(root)\n print(','.join(res))\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data = collections.deque(data.split(','))\n\n def dePreorder():\n print(self.data)\n if self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return\n root = Node(int(self.data.popleft()), [])\n print(root.val)\n while len(self.data) > 0 and self.data[0] != 'None':\n root.children.append(dePreorder())\n if len(self.data) > 0 and self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return root\n res = dePreorder()\n return res\n<|end_body_1|>\n", "revision_id": "f6df35359b223cdd1635c287455032ae1463906f", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n res = []\n\n def preorder(root):\n if root == None:\n return\n res.append(str(root.val))\n if root.children == []:\n res.append('None')\n res.append('None')\n for child in root.children:\n preorder(child)\n preorder(root)\n print(','.join(res))\n return ','.join(res)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n self.data = collections.deque(data.split(','))\n\n def dePreorder():\n print(self.data)\n if self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return\n root = Node(int(self.data.popleft()), [])\n print(root.val)\n while len(self.data) > 0 and self.data[0] != 'None':\n root.children.append(dePreorder())\n if len(self.data) > 0 and self.data[0] == 'None':\n self.data.popleft()\n self.data.popleft()\n return root\n res = dePreorder()\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/src/SerializeandDeserializeN-aryTree.py", "source_repo": "jinwei15/java-PythonSyntax-Leetcode", "split": "val", "star_events_count": 0} {"blob_id": "af03ec9ac10d6e4388997034ccd9215a7e18a820", "bodies": ["self.language = language\nself.sum_eng = 0\nself.sum_fre = 0\nself.sum_ger = 0\nself.conn_eng = 0\nself.conn_fre = 0\nself.conn_ger = 0\nself.english_lang_percentage = None\nself.french_lang_percentage = None\nself.german_lang_percentage = None\nself.foreign_percentage = None", "total = self.sum_eng + self.sum_fre + self.sum_ger\nself.english_lang_percentage = self.sum_eng / total\nself.french_lang_percentage = self.sum_fre / total\nself.german_lang_percentage = self.sum_ger / total", "total = self.sum_eng + self.sum_fre + self.sum_ger\nif self.language == 'english':\n self.foreign_percentage = (self.sum_fre + self.sum_ger) / total\nelif self.language == 'french':\n self.foreign_percentage = (self.sum_eng + self.sum_ger) / total\nelif self.language == 'german':\n self.foreign_percentage = (self.sum_eng + self.sum_fre) / total\nelse:\n raise Exception('Unknown language')", "if not self.english_lang_percentage:\n self.calculate_language_percentages()\nif not self.foreign_percentage:\n self.calculate_foreign_percentage()\nreturn_string = self.language + ',' + str(self.sum_eng) + ',' + str(self.sum_fre) + ',' + str(self.sum_ger) + ',' + str(self.conn_eng) + ',' + str(self.conn_fre) + ',' + str(self.conn_ger) + ',' + str(self.english_lang_percentage) + ',' + str(self.french_lang_percentage) + ',' + str(self.german_lang_percentage) + ',' + str(self.foreign_percentage) + '\\n'\nreturn return_string"], "bodies_text": "<|body_start_0|>\n self.language = language\n self.sum_eng = 0\n self.sum_fre = 0\n self.sum_ger = 0\n self.conn_eng = 0\n self.conn_fre = 0\n self.conn_ger = 0\n self.english_lang_percentage = None\n self.french_lang_percentage = None\n self.german_lang_percentage = None\n self.foreign_percentage = None\n<|end_body_0|>\n\n<|body_start_1|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n self.english_lang_percentage = self.sum_eng / total\n self.french_lang_percentage = self.sum_fre / total\n self.german_lang_percentage = self.sum_ger / total\n<|end_body_1|>\n\n<|body_start_2|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n if self.language == 'english':\n self.foreign_percentage = (self.sum_fre + self.sum_ger) / total\n elif self.language == 'french':\n self.foreign_percentage = (self.sum_eng + self.sum_ger) / total\n elif self.language == 'german':\n self.foreign_percentage = (self.sum_eng + self.sum_fre) / total\n else:\n raise Exception('Unknown language')\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.english_lang_percentage:\n self.calculate_language_percentages()\n if not self.foreign_percentage:\n self.calculate_foreign_percentage()\n return_string = self.language + ',' + str(self.sum_eng) + ',' + str(self.sum_fre) + ',' + str(self.sum_ger) + ',' + str(self.conn_eng) + ',' + str(self.conn_fre) + ',' + str(self.conn_ger) + ',' + str(self.english_lang_percentage) + ',' + str(self.french_lang_percentage) + ',' + str(self.german_lang_percentage) + ',' + str(self.foreign_percentage) + '\\n'\n return return_string\n<|end_body_3|>\n", "class_docstring": "LanguageSum class", "class_name": "LanguageSum", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LanguageSum:\n \"\"\"LanguageSum class\"\"\"\n\n def __init__(self, language):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def calculate_language_percentages(self):\n \"\"\"Calculate percentage for each language\"\"\"\n <|body_1|>\n\n def calculate_foreign_percentage(self):\n \"\"\"Calculate percentage of hits which are foreign to that particular author\"\"\"\n <|body_2|>\n\n def __str__(self):\n \"\"\"return LanguageSum as a string\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.language = language\n self.sum_eng = 0\n self.sum_fre = 0\n self.sum_ger = 0\n self.conn_eng = 0\n self.conn_fre = 0\n self.conn_ger = 0\n self.english_lang_percentage = None\n self.french_lang_percentage = None\n self.german_lang_percentage = None\n self.foreign_percentage = None\n<|end_body_0|>\n\n<|body_start_1|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n self.english_lang_percentage = self.sum_eng / total\n self.french_lang_percentage = self.sum_fre / total\n self.german_lang_percentage = self.sum_ger / total\n<|end_body_1|>\n\n<|body_start_2|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n if self.language == 'english':\n self.foreign_percentage = (self.sum_fre + self.sum_ger) / total\n elif self.language == 'french':\n self.foreign_percentage = (self.sum_eng + self.sum_ger) / total\n elif self.language == 'german':\n self.foreign_percentage = (self.sum_eng + self.sum_fre) / total\n else:\n raise Exception('Unknown language')\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.english_lang_percentage:\n self.calculate_language_percentages()\n if not self.foreign_percentage:\n self.calculate_foreign_percentage()\n return_string = self.language + ',' + str(self.sum_eng) + ',' + str(self.sum_fre) + ',' + str(self.sum_ger) + ',' + str(self.conn_eng) + ',' + str(self.conn_fre) + ',' + str(self.conn_ger) + ',' + str(self.english_lang_percentage) + ',' + str(self.french_lang_percentage) + ',' + str(self.german_lang_percentage) + ',' + str(self.foreign_percentage) + '\\n'\n return return_string\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000341", "length_bytes": 4643, "license_type": "no_license", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, language)"}, {"docstring": "Calculate percentage for each language", "name": "calculate_language_percentages", "signature": "def calculate_language_percentages(self)"}, {"docstring": "Calculate percentage of hits which are foreign to that particular author", "name": "calculate_foreign_percentage", "signature": "def calculate_foreign_percentage(self)"}, {"docstring": "return LanguageSum as a string", "name": "__str__", "signature": "def __str__(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002837", "prompt": "Implement the Python class `LanguageSum` described below.\n\nClass description:\nLanguageSum class\n\nMethod signatures and docstrings:\n- def __init__(self, language): Constructor\n- def calculate_language_percentages(self): Calculate percentage for each language\n- def calculate_foreign_percentage(self): Calculate percentage of hits which are foreign to that particular author\n- def __str__(self): return LanguageSum as a string", "prompted_full_text": "Implement the Python class `LanguageSum` described below.\n\nClass description:\nLanguageSum class\n\nMethod signatures and docstrings:\n- def __init__(self, language): Constructor\n- def calculate_language_percentages(self): Calculate percentage for each language\n- def calculate_foreign_percentage(self): Calculate percentage of hits which are foreign to that particular author\n- def __str__(self): return LanguageSum as a string\n\n<|skeleton|>\nclass LanguageSum:\n \"\"\"LanguageSum class\"\"\"\n\n def __init__(self, language):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def calculate_language_percentages(self):\n \"\"\"Calculate percentage for each language\"\"\"\n <|body_1|>\n\n def calculate_foreign_percentage(self):\n \"\"\"Calculate percentage of hits which are foreign to that particular author\"\"\"\n <|body_2|>\n\n def __str__(self):\n \"\"\"return LanguageSum as a string\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.language = language\n self.sum_eng = 0\n self.sum_fre = 0\n self.sum_ger = 0\n self.conn_eng = 0\n self.conn_fre = 0\n self.conn_ger = 0\n self.english_lang_percentage = None\n self.french_lang_percentage = None\n self.german_lang_percentage = None\n self.foreign_percentage = None\n<|end_body_0|>\n\n<|body_start_1|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n self.english_lang_percentage = self.sum_eng / total\n self.french_lang_percentage = self.sum_fre / total\n self.german_lang_percentage = self.sum_ger / total\n<|end_body_1|>\n\n<|body_start_2|>\n total = self.sum_eng + self.sum_fre + self.sum_ger\n if self.language == 'english':\n self.foreign_percentage = (self.sum_fre + self.sum_ger) / total\n elif self.language == 'french':\n self.foreign_percentage = (self.sum_eng + self.sum_ger) / total\n elif self.language == 'german':\n self.foreign_percentage = (self.sum_eng + self.sum_fre) / total\n else:\n raise Exception('Unknown language')\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.english_lang_percentage:\n self.calculate_language_percentages()\n if not self.foreign_percentage:\n self.calculate_foreign_percentage()\n return_string = self.language + ',' + str(self.sum_eng) + ',' + str(self.sum_fre) + ',' + str(self.sum_ger) + ',' + str(self.conn_eng) + ',' + str(self.conn_fre) + ',' + str(self.conn_ger) + ',' + str(self.english_lang_percentage) + ',' + str(self.french_lang_percentage) + ',' + str(self.german_lang_percentage) + ',' + str(self.foreign_percentage) + '\\n'\n return return_string\n<|end_body_3|>\n", "revision_id": "a717e6c0fa8d0b3a5787d1536a57c5875ffb5799", "skeleton": "<|skeleton|>\nclass LanguageSum:\n \"\"\"LanguageSum class\"\"\"\n\n def __init__(self, language):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def calculate_language_percentages(self):\n \"\"\"Calculate percentage for each language\"\"\"\n <|body_1|>\n\n def calculate_foreign_percentage(self):\n \"\"\"Calculate percentage of hits which are foreign to that particular author\"\"\"\n <|body_2|>\n\n def __str__(self):\n \"\"\"return LanguageSum as a string\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LanguageSum:\n \"\"\"LanguageSum class\"\"\"\n\n def __init__(self, language):\n \"\"\"Constructor\"\"\"\n self.language = language\n self.sum_eng = 0\n self.sum_fre = 0\n self.sum_ger = 0\n self.conn_eng = 0\n self.conn_fre = 0\n self.conn_ger = 0\n self.english_lang_percentage = None\n self.french_lang_percentage = None\n self.german_lang_percentage = None\n self.foreign_percentage = None\n\n def calculate_language_percentages(self):\n \"\"\"Calculate percentage for each language\"\"\"\n total = self.sum_eng + self.sum_fre + self.sum_ger\n self.english_lang_percentage = self.sum_eng / total\n self.french_lang_percentage = self.sum_fre / total\n self.german_lang_percentage = self.sum_ger / total\n\n def calculate_foreign_percentage(self):\n \"\"\"Calculate percentage of hits which are foreign to that particular author\"\"\"\n total = self.sum_eng + self.sum_fre + self.sum_ger\n if self.language == 'english':\n self.foreign_percentage = (self.sum_fre + self.sum_ger) / total\n elif self.language == 'french':\n self.foreign_percentage = (self.sum_eng + self.sum_ger) / total\n elif self.language == 'german':\n self.foreign_percentage = (self.sum_eng + self.sum_fre) / total\n else:\n raise Exception('Unknown language')\n\n def __str__(self):\n \"\"\"return LanguageSum as a string\"\"\"\n if not self.english_lang_percentage:\n self.calculate_language_percentages()\n if not self.foreign_percentage:\n self.calculate_foreign_percentage()\n return_string = self.language + ',' + str(self.sum_eng) + ',' + str(self.sum_fre) + ',' + str(self.sum_ger) + ',' + str(self.conn_eng) + ',' + str(self.conn_fre) + ',' + str(self.conn_ger) + ',' + str(self.english_lang_percentage) + ',' + str(self.french_lang_percentage) + ',' + str(self.german_lang_percentage) + ',' + str(self.foreign_percentage) + '\\n'\n return return_string\n", "source": "the_stack_v2_python_sparse", "source_path": "src/measures/cooccurrence/sum_languages.py", "source_repo": "niutyut/author-topic-modeling-project", "split": "val", "star_events_count": 0} {"blob_id": "0d4f7d61f4a35c62f973ef175267e9b3999931d0", "bodies": ["self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\nself.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\nself.bakery = Company.objects.create(name='bakery', caffe=self.caffe)\nself.bakery_f = Company.objects.create(name='bakery', caffe=self.filtry)\nself.cakes = Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)", "self.assertEqual(self.bakery.name, 'bakery')\nself.assertEqual(self.bakery.caffe, self.caffe)\nwith self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\nExpense.objects.create(name='cakes', company=self.bakery_f, caffe=self.filtry)", "with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.caffe)\nwith self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.filtry)"], "bodies_text": "<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.bakery = Company.objects.create(name='bakery', caffe=self.caffe)\n self.bakery_f = Company.objects.create(name='bakery', caffe=self.filtry)\n self.cakes = Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(self.bakery.name, 'bakery')\n self.assertEqual(self.bakery.caffe, self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.filtry)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.filtry)\n<|end_body_2|>\n", "class_docstring": "Expense model tests.", "class_name": "ExpenseModelTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExpenseModelTest:\n \"\"\"Expense model tests.\"\"\"\n\n def setUp(self):\n \"\"\"Test data setup.\"\"\"\n <|body_0|>\n\n def test_name(self):\n \"\"\"Check if name is unique across one caffe.\"\"\"\n <|body_1|>\n\n def test_expense_validation(self):\n \"\"\"Check expense validation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.bakery = Company.objects.create(name='bakery', caffe=self.caffe)\n self.bakery_f = Company.objects.create(name='bakery', caffe=self.filtry)\n self.cakes = Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(self.bakery.name, 'bakery')\n self.assertEqual(self.bakery.caffe, self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.filtry)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.filtry)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000342", "length_bytes": 8665, "license_type": "permissive", "methods": [{"docstring": "Test data setup.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Check if name is unique across one caffe.", "name": "test_name", "signature": "def test_name(self)"}, {"docstring": "Check expense validation.", "name": "test_expense_validation", "signature": "def test_expense_validation(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005140", "prompt": "Implement the Python class `ExpenseModelTest` described below.\n\nClass description:\nExpense model tests.\n\nMethod signatures and docstrings:\n- def setUp(self): Test data setup.\n- def test_name(self): Check if name is unique across one caffe.\n- def test_expense_validation(self): Check expense validation.", "prompted_full_text": "Implement the Python class `ExpenseModelTest` described below.\n\nClass description:\nExpense model tests.\n\nMethod signatures and docstrings:\n- def setUp(self): Test data setup.\n- def test_name(self): Check if name is unique across one caffe.\n- def test_expense_validation(self): Check expense validation.\n\n<|skeleton|>\nclass ExpenseModelTest:\n \"\"\"Expense model tests.\"\"\"\n\n def setUp(self):\n \"\"\"Test data setup.\"\"\"\n <|body_0|>\n\n def test_name(self):\n \"\"\"Check if name is unique across one caffe.\"\"\"\n <|body_1|>\n\n def test_expense_validation(self):\n \"\"\"Check expense validation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.bakery = Company.objects.create(name='bakery', caffe=self.caffe)\n self.bakery_f = Company.objects.create(name='bakery', caffe=self.filtry)\n self.cakes = Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(self.bakery.name, 'bakery')\n self.assertEqual(self.bakery.caffe, self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.filtry)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.filtry)\n<|end_body_2|>\n", "revision_id": "cdb7f5edb29255c7e874eaa6231621063210a8b0", "skeleton": "<|skeleton|>\nclass ExpenseModelTest:\n \"\"\"Expense model tests.\"\"\"\n\n def setUp(self):\n \"\"\"Test data setup.\"\"\"\n <|body_0|>\n\n def test_name(self):\n \"\"\"Check if name is unique across one caffe.\"\"\"\n <|body_1|>\n\n def test_expense_validation(self):\n \"\"\"Check expense validation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExpenseModelTest:\n \"\"\"Expense model tests.\"\"\"\n\n def setUp(self):\n \"\"\"Test data setup.\"\"\"\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.bakery = Company.objects.create(name='bakery', caffe=self.caffe)\n self.bakery_f = Company.objects.create(name='bakery', caffe=self.filtry)\n self.cakes = Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n\n def test_name(self):\n \"\"\"Check if name is unique across one caffe.\"\"\"\n self.assertEqual(self.bakery.name, 'bakery')\n self.assertEqual(self.bakery.caffe, self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.caffe)\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.filtry)\n\n def test_expense_validation(self):\n \"\"\"Check expense validation.\"\"\"\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery_f, caffe=self.caffe)\n with self.assertRaises(Exception):\n Expense.objects.create(name='cakes', company=self.bakery, caffe=self.filtry)\n", "source": "the_stack_v2_python_sparse", "source_path": "caffe/cash/test_models.py", "source_repo": "VirrageS/io-kawiarnie", "split": "val", "star_events_count": 3} {"blob_id": "4806eadc7770d7b6c0a4479286b27987541156dc", "bodies": ["left, right, width, res = (0, len(height) - 1, len(height) - 1, 0)\nfor w in range(width, 0, -1):\n if height[left] < height[right]:\n res, left = (max(res, height[left] * w), left + 1)\n else:\n res, right = (max(res, height[right] * w), right - 1)\nreturn res", "left = 0\nright = len(height) - 1\nwater = 0\nwhile left < right:\n water = max(water, (right - left) * min(height[left], height[right]))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\nreturn water", "left = 0\nright = len(height) - 1\nwater = 0\nwhile left < right:\n h = min(height[left], height[right])\n water = max(water, h * (right - left))\n while height[left] <= h and left < right:\n left += 1\n while height[right] <= h and left < right:\n right -= 1\nreturn water"], "bodies_text": "<|body_start_0|>\n left, right, width, res = (0, len(height) - 1, len(height) - 1, 0)\n for w in range(width, 0, -1):\n if height[left] < height[right]:\n res, left = (max(res, height[left] * w), left + 1)\n else:\n res, right = (max(res, height[right] * w), right - 1)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n water = max(water, (right - left) * min(height[left], height[right]))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\n return water\n<|end_body_1|>\n\n<|body_start_2|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n h = min(height[left], height[right])\n water = max(water, h * (right - left))\n while height[left] <= h and left < right:\n left += 1\n while height[right] <= h and left < right:\n right -= 1\n return water\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\"\"\"\n <|body_1|>\n\n def maxArea2(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left, right, width, res = (0, len(height) - 1, len(height) - 1, 0)\n for w in range(width, 0, -1):\n if height[left] < height[right]:\n res, left = (max(res, height[left] * w), left + 1)\n else:\n res, right = (max(res, height[right] * w), right - 1)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n water = max(water, (right - left) * min(height[left], height[right]))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\n return water\n<|end_body_1|>\n\n<|body_start_2|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n h = min(height[left], height[right])\n water = max(water, h * (right - left))\n while height[left] <= h and left < right:\n left += 1\n while height[right] <= h and left < right:\n right -= 1\n return water\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000343", "length_bytes": 2053, "license_type": "no_license", "methods": [{"docstring": ":type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%", "name": "maxArea", "signature": "def maxArea(self, height)"}, {"docstring": ":param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%", "name": "maxArea1", "signature": "def maxArea1(self, height)"}, {"docstring": ":param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%", "name": "maxArea2", "signature": "def maxArea2(self, height)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006173", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxArea(self, height): :type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\n- def maxArea1(self, height): :param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\n- def maxArea2(self, height): :param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxArea(self, height): :type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\n- def maxArea1(self, height): :param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\n- def maxArea2(self, height): :param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%\n\n<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\"\"\"\n <|body_1|>\n\n def maxArea2(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left, right, width, res = (0, len(height) - 1, len(height) - 1, 0)\n for w in range(width, 0, -1):\n if height[left] < height[right]:\n res, left = (max(res, height[left] * w), left + 1)\n else:\n res, right = (max(res, height[right] * w), right - 1)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n water = max(water, (right - left) * min(height[left], height[right]))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\n return water\n<|end_body_1|>\n\n<|body_start_2|>\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n h = min(height[left], height[right])\n water = max(water, h * (right - left))\n while height[left] <= h and left < right:\n left += 1\n while height[right] <= h and left < right:\n right -= 1\n return water\n<|end_body_2|>\n", "revision_id": "7e0e917c15d3e35f49da3a00ef395bd5ff180d79", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\"\"\"\n <|body_0|>\n\n def maxArea1(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\"\"\"\n <|body_1|>\n\n def maxArea2(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 97.39%\"\"\"\n left, right, width, res = (0, len(height) - 1, len(height) - 1, 0)\n for w in range(width, 0, -1):\n if height[left] < height[right]:\n res, left = (max(res, height[left] * w), left + 1)\n else:\n res, right = (max(res, height[right] * w), right - 1)\n return res\n\n def maxArea1(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/14940/simple-and-clear-proof-explanation the largest width is from start to end later heights can compete only with larger heights, so move smaller between left and right heights beats 46.59%\"\"\"\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n water = max(water, (right - left) * min(height[left], height[right]))\n if height[left] < height[right]:\n left += 1\n else:\n right -= 1\n return water\n\n def maxArea2(self, height):\n \"\"\":param height: :return: https://discuss.leetcode.com/topic/16754/simple-and-fast-c-c-with-explanation the largest width is from start to end later heights can compete only with larger heights, so move left and right heights until meet larger ones beats 81.42%\"\"\"\n left = 0\n right = len(height) - 1\n water = 0\n while left < right:\n h = min(height[left], height[right])\n water = max(water, h * (right - left))\n while height[left] <= h and left < right:\n left += 1\n while height[right] <= h and left < right:\n right -= 1\n return water\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/011_container_with_most_water.py", "source_repo": "yao23/Machine_Learning_Playground", "split": "val", "star_events_count": 12} {"blob_id": "79ea8c9f796521822e835e9fde4c46269a4d2098", "bodies": ["self.allow_api_based_fetch = allow_api_based_fetch\nself.cluster_destroy_hmac_key = cluster_destroy_hmac_key\nself.cluster_name = cluster_name\nself.enable_cluster_destroy = enable_cluster_destroy\nself.encryption_config = encryption_config\nself.ip_preference = ip_preference\nself.ipmi_config = ipmi_config\nself.metadata_fault_tolerance = metadata_fault_tolerance\nself.network_config = network_config\nself.node_configs = node_configs\nself.trust_domain = trust_domain", "if dictionary is None:\n return None\nallow_api_based_fetch = dictionary.get('allowApiBasedFetch')\ncluster_destroy_hmac_key = dictionary.get('clusterDestroyHmacKey')\ncluster_name = dictionary.get('clusterName')\nenable_cluster_destroy = dictionary.get('enableClusterDestroy')\nencryption_config = cohesity_management_sdk.models.encryption_configuration.EncryptionConfiguration.from_dictionary(dictionary.get('encryptionConfig')) if dictionary.get('encryptionConfig') else None\nip_preference = dictionary.get('ipPreference')\nipmi_config = cohesity_management_sdk.models.ipmi_configuration.IpmiConfiguration.from_dictionary(dictionary.get('ipmiConfig')) if dictionary.get('ipmiConfig') else None\nmetadata_fault_tolerance = dictionary.get('metadataFaultTolerance')\nnetwork_config = cohesity_management_sdk.models.network_configuration.NetworkConfiguration.from_dictionary(dictionary.get('networkConfig')) if dictionary.get('networkConfig') else None\nnode_configs = None\nif dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\ntrust_domain = dictionary.get('trustDomain')\nreturn cls(allow_api_based_fetch, cluster_destroy_hmac_key, cluster_name, enable_cluster_destroy, encryption_config, ip_preference, ipmi_config, metadata_fault_tolerance, network_config, node_configs, trust_domain)"], "bodies_text": "<|body_start_0|>\n self.allow_api_based_fetch = allow_api_based_fetch\n self.cluster_destroy_hmac_key = cluster_destroy_hmac_key\n self.cluster_name = cluster_name\n self.enable_cluster_destroy = enable_cluster_destroy\n self.encryption_config = encryption_config\n self.ip_preference = ip_preference\n self.ipmi_config = ipmi_config\n self.metadata_fault_tolerance = metadata_fault_tolerance\n self.network_config = network_config\n self.node_configs = node_configs\n self.trust_domain = trust_domain\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n allow_api_based_fetch = dictionary.get('allowApiBasedFetch')\n cluster_destroy_hmac_key = dictionary.get('clusterDestroyHmacKey')\n cluster_name = dictionary.get('clusterName')\n enable_cluster_destroy = dictionary.get('enableClusterDestroy')\n encryption_config = cohesity_management_sdk.models.encryption_configuration.EncryptionConfiguration.from_dictionary(dictionary.get('encryptionConfig')) if dictionary.get('encryptionConfig') else None\n ip_preference = dictionary.get('ipPreference')\n ipmi_config = cohesity_management_sdk.models.ipmi_configuration.IpmiConfiguration.from_dictionary(dictionary.get('ipmiConfig')) if dictionary.get('ipmiConfig') else None\n metadata_fault_tolerance = dictionary.get('metadataFaultTolerance')\n network_config = cohesity_management_sdk.models.network_configuration.NetworkConfiguration.from_dictionary(dictionary.get('networkConfig')) if dictionary.get('networkConfig') else None\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n trust_domain = dictionary.get('trustDomain')\n return cls(allow_api_based_fetch, cluster_destroy_hmac_key, cluster_name, enable_cluster_destroy, encryption_config, ip_preference, ipmi_config, metadata_fault_tolerance, network_config, node_configs, trust_domain)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr", "class_name": "CreatePhysicalClusterParameters", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreatePhysicalClusterParameters:\n \"\"\"Implementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\"\"\"\n\n def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None):\n \"\"\"Constructor for the CreatePhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.allow_api_based_fetch = allow_api_based_fetch\n self.cluster_destroy_hmac_key = cluster_destroy_hmac_key\n self.cluster_name = cluster_name\n self.enable_cluster_destroy = enable_cluster_destroy\n self.encryption_config = encryption_config\n self.ip_preference = ip_preference\n self.ipmi_config = ipmi_config\n self.metadata_fault_tolerance = metadata_fault_tolerance\n self.network_config = network_config\n self.node_configs = node_configs\n self.trust_domain = trust_domain\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n allow_api_based_fetch = dictionary.get('allowApiBasedFetch')\n cluster_destroy_hmac_key = dictionary.get('clusterDestroyHmacKey')\n cluster_name = dictionary.get('clusterName')\n enable_cluster_destroy = dictionary.get('enableClusterDestroy')\n encryption_config = cohesity_management_sdk.models.encryption_configuration.EncryptionConfiguration.from_dictionary(dictionary.get('encryptionConfig')) if dictionary.get('encryptionConfig') else None\n ip_preference = dictionary.get('ipPreference')\n ipmi_config = cohesity_management_sdk.models.ipmi_configuration.IpmiConfiguration.from_dictionary(dictionary.get('ipmiConfig')) if dictionary.get('ipmiConfig') else None\n metadata_fault_tolerance = dictionary.get('metadataFaultTolerance')\n network_config = cohesity_management_sdk.models.network_configuration.NetworkConfiguration.from_dictionary(dictionary.get('networkConfig')) if dictionary.get('networkConfig') else None\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n trust_domain = dictionary.get('trustDomain')\n return cls(allow_api_based_fetch, cluster_destroy_hmac_key, cluster_name, enable_cluster_destroy, encryption_config, ip_preference, ipmi_config, metadata_fault_tolerance, network_config, node_configs, trust_domain)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000344", "length_bytes": 5957, "license_type": "permissive", "methods": [{"docstring": "Constructor for the CreatePhysicalClusterParameters class", "name": "__init__", "signature": "def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `CreatePhysicalClusterParameters` described below.\n\nClass description:\nImplementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\n\nMethod signatures and docstrings:\n- def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None): Constructor for the CreatePhysicalClusterParameters class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `CreatePhysicalClusterParameters` described below.\n\nClass description:\nImplementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\n\nMethod signatures and docstrings:\n- def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None): Constructor for the CreatePhysicalClusterParameters class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass CreatePhysicalClusterParameters:\n \"\"\"Implementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\"\"\"\n\n def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None):\n \"\"\"Constructor for the CreatePhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.allow_api_based_fetch = allow_api_based_fetch\n self.cluster_destroy_hmac_key = cluster_destroy_hmac_key\n self.cluster_name = cluster_name\n self.enable_cluster_destroy = enable_cluster_destroy\n self.encryption_config = encryption_config\n self.ip_preference = ip_preference\n self.ipmi_config = ipmi_config\n self.metadata_fault_tolerance = metadata_fault_tolerance\n self.network_config = network_config\n self.node_configs = node_configs\n self.trust_domain = trust_domain\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n allow_api_based_fetch = dictionary.get('allowApiBasedFetch')\n cluster_destroy_hmac_key = dictionary.get('clusterDestroyHmacKey')\n cluster_name = dictionary.get('clusterName')\n enable_cluster_destroy = dictionary.get('enableClusterDestroy')\n encryption_config = cohesity_management_sdk.models.encryption_configuration.EncryptionConfiguration.from_dictionary(dictionary.get('encryptionConfig')) if dictionary.get('encryptionConfig') else None\n ip_preference = dictionary.get('ipPreference')\n ipmi_config = cohesity_management_sdk.models.ipmi_configuration.IpmiConfiguration.from_dictionary(dictionary.get('ipmiConfig')) if dictionary.get('ipmiConfig') else None\n metadata_fault_tolerance = dictionary.get('metadataFaultTolerance')\n network_config = cohesity_management_sdk.models.network_configuration.NetworkConfiguration.from_dictionary(dictionary.get('networkConfig')) if dictionary.get('networkConfig') else None\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n trust_domain = dictionary.get('trustDomain')\n return cls(allow_api_based_fetch, cluster_destroy_hmac_key, cluster_name, enable_cluster_destroy, encryption_config, ip_preference, ipmi_config, metadata_fault_tolerance, network_config, node_configs, trust_domain)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass CreatePhysicalClusterParameters:\n \"\"\"Implementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\"\"\"\n\n def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None):\n \"\"\"Constructor for the CreatePhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CreatePhysicalClusterParameters:\n \"\"\"Implementation of the 'CreatePhysicalClusterParameters' model. Specifies the parameters needed for creation of a new Cluster. Attributes: allow_api_based_fetch (bool): Specifies if API based GET should be enabled for cluster destroy params. cluster_destroy_hmac_key (string): Specifies HMAC secret key that will be used to validate OTP used for destroy request. This is b32 format of the HMAC key. This should only be set/modified during cluster creation. cluster_name (string, required): Specifies the name of the new Cluster. enable_cluster_destroy (bool): Specifies if cluster destroy op is enabled on this cluster. This should only be set/modified during cluster creation. encryption_config (Encr\"\"\"\n\n def __init__(self, allow_api_based_fetch=None, cluster_destroy_hmac_key=None, cluster_name=None, enable_cluster_destroy=None, encryption_config=None, ip_preference=None, ipmi_config=None, metadata_fault_tolerance=None, network_config=None, node_configs=None, trust_domain=None):\n \"\"\"Constructor for the CreatePhysicalClusterParameters class\"\"\"\n self.allow_api_based_fetch = allow_api_based_fetch\n self.cluster_destroy_hmac_key = cluster_destroy_hmac_key\n self.cluster_name = cluster_name\n self.enable_cluster_destroy = enable_cluster_destroy\n self.encryption_config = encryption_config\n self.ip_preference = ip_preference\n self.ipmi_config = ipmi_config\n self.metadata_fault_tolerance = metadata_fault_tolerance\n self.network_config = network_config\n self.node_configs = node_configs\n self.trust_domain = trust_domain\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n allow_api_based_fetch = dictionary.get('allowApiBasedFetch')\n cluster_destroy_hmac_key = dictionary.get('clusterDestroyHmacKey')\n cluster_name = dictionary.get('clusterName')\n enable_cluster_destroy = dictionary.get('enableClusterDestroy')\n encryption_config = cohesity_management_sdk.models.encryption_configuration.EncryptionConfiguration.from_dictionary(dictionary.get('encryptionConfig')) if dictionary.get('encryptionConfig') else None\n ip_preference = dictionary.get('ipPreference')\n ipmi_config = cohesity_management_sdk.models.ipmi_configuration.IpmiConfiguration.from_dictionary(dictionary.get('ipmiConfig')) if dictionary.get('ipmiConfig') else None\n metadata_fault_tolerance = dictionary.get('metadataFaultTolerance')\n network_config = cohesity_management_sdk.models.network_configuration.NetworkConfiguration.from_dictionary(dictionary.get('networkConfig')) if dictionary.get('networkConfig') else None\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n trust_domain = dictionary.get('trustDomain')\n return cls(allow_api_based_fetch, cluster_destroy_hmac_key, cluster_name, enable_cluster_destroy, encryption_config, ip_preference, ipmi_config, metadata_fault_tolerance, network_config, node_configs, trust_domain)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/create_physical_cluster_parameters.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "6dab96c3aa659f167a4e9ac7ed0a706235fab183", "bodies": ["Parametre.__init__(self, 'liste', 'list')\nself.aide_courte = 'liste les bannisements'\nself.aide_longue = 'Cette commande liste les bannissements actuels, temporaires ou prolongés, de joueurs, comptes ou adresses.'", "msg = 'Bannissements actuels :\\n'\ntemporaires = []\nmaintenant = datetime.now()\nfor joueur, date in importeur.connex.bannissements_temporaires.items():\n msg_temp = joueur.nom + ' ('\n dans = (date - maintenant).total_seconds()\n mesure = 's'\n if maintenant > date or dans < 0:\n dans = 0\n elif dans >= 86400:\n dans //= 86400\n mesure = 'j'\n elif dans >= 3600:\n dans //= 3600\n mesure = 'h'\n elif dans >= 60:\n dans //= 60\n mesure = 'm'\n msg_temp += str(int(dans)) + mesure + ')'\n temporaires.append(msg_temp)\ntemporaires = ', '.join(temporaires)\nif not temporaires:\n temporaires = '|att|aucun|ff|'\njoueurs = ', '.join((j.nom for j in importeur.connex.joueurs_bannis))\nif not joueurs:\n joueurs = '|att|aucun|ff|'\nmsg += '\\n Bannissements temporaires : ' + temporaires\nmsg += '\\n Bannissements de joueurs : ' + joueurs\npersonnage << msg"], "bodies_text": "<|body_start_0|>\n Parametre.__init__(self, 'liste', 'list')\n self.aide_courte = 'liste les bannisements'\n self.aide_longue = 'Cette commande liste les bannissements actuels, temporaires ou prolongés, de joueurs, comptes ou adresses.'\n<|end_body_0|>\n\n<|body_start_1|>\n msg = 'Bannissements actuels :\\n'\n temporaires = []\n maintenant = datetime.now()\n for joueur, date in importeur.connex.bannissements_temporaires.items():\n msg_temp = joueur.nom + ' ('\n dans = (date - maintenant).total_seconds()\n mesure = 's'\n if maintenant > date or dans < 0:\n dans = 0\n elif dans >= 86400:\n dans //= 86400\n mesure = 'j'\n elif dans >= 3600:\n dans //= 3600\n mesure = 'h'\n elif dans >= 60:\n dans //= 60\n mesure = 'm'\n msg_temp += str(int(dans)) + mesure + ')'\n temporaires.append(msg_temp)\n temporaires = ', '.join(temporaires)\n if not temporaires:\n temporaires = '|att|aucun|ff|'\n joueurs = ', '.join((j.nom for j in importeur.connex.joueurs_bannis))\n if not joueurs:\n joueurs = '|att|aucun|ff|'\n msg += '\\n Bannissements temporaires : ' + temporaires\n msg += '\\n Bannissements de joueurs : ' + joueurs\n personnage << msg\n<|end_body_1|>\n", "class_docstring": "Commande 'bannir liste'.", "class_name": "PrmListe", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrmListe:\n \"\"\"Commande 'bannir liste'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'liste', 'list')\n self.aide_courte = 'liste les bannisements'\n self.aide_longue = 'Cette commande liste les bannissements actuels, temporaires ou prolongés, de joueurs, comptes ou adresses.'\n<|end_body_0|>\n\n<|body_start_1|>\n msg = 'Bannissements actuels :\\n'\n temporaires = []\n maintenant = datetime.now()\n for joueur, date in importeur.connex.bannissements_temporaires.items():\n msg_temp = joueur.nom + ' ('\n dans = (date - maintenant).total_seconds()\n mesure = 's'\n if maintenant > date or dans < 0:\n dans = 0\n elif dans >= 86400:\n dans //= 86400\n mesure = 'j'\n elif dans >= 3600:\n dans //= 3600\n mesure = 'h'\n elif dans >= 60:\n dans //= 60\n mesure = 'm'\n msg_temp += str(int(dans)) + mesure + ')'\n temporaires.append(msg_temp)\n temporaires = ', '.join(temporaires)\n if not temporaires:\n temporaires = '|att|aucun|ff|'\n joueurs = ', '.join((j.nom for j in importeur.connex.joueurs_bannis))\n if not joueurs:\n joueurs = '|att|aucun|ff|'\n msg += '\\n Bannissements temporaires : ' + temporaires\n msg += '\\n Bannissements de joueurs : ' + joueurs\n personnage << msg\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000345", "length_bytes": 3416, "license_type": "permissive", "methods": [{"docstring": "Constructeur du paramètre", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Interprétation du paramètre", "name": "interpreter", "signature": "def interpreter(self, personnage, dic_masques)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000053", "prompt": "Implement the Python class `PrmListe` described below.\n\nClass description:\nCommande 'bannir liste'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre", "prompted_full_text": "Implement the Python class `PrmListe` described below.\n\nClass description:\nCommande 'bannir liste'.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre\n- def interpreter(self, personnage, dic_masques): Interprétation du paramètre\n\n<|skeleton|>\nclass PrmListe:\n \"\"\"Commande 'bannir liste'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'liste', 'list')\n self.aide_courte = 'liste les bannisements'\n self.aide_longue = 'Cette commande liste les bannissements actuels, temporaires ou prolongés, de joueurs, comptes ou adresses.'\n<|end_body_0|>\n\n<|body_start_1|>\n msg = 'Bannissements actuels :\\n'\n temporaires = []\n maintenant = datetime.now()\n for joueur, date in importeur.connex.bannissements_temporaires.items():\n msg_temp = joueur.nom + ' ('\n dans = (date - maintenant).total_seconds()\n mesure = 's'\n if maintenant > date or dans < 0:\n dans = 0\n elif dans >= 86400:\n dans //= 86400\n mesure = 'j'\n elif dans >= 3600:\n dans //= 3600\n mesure = 'h'\n elif dans >= 60:\n dans //= 60\n mesure = 'm'\n msg_temp += str(int(dans)) + mesure + ')'\n temporaires.append(msg_temp)\n temporaires = ', '.join(temporaires)\n if not temporaires:\n temporaires = '|att|aucun|ff|'\n joueurs = ', '.join((j.nom for j in importeur.connex.joueurs_bannis))\n if not joueurs:\n joueurs = '|att|aucun|ff|'\n msg += '\\n Bannissements temporaires : ' + temporaires\n msg += '\\n Bannissements de joueurs : ' + joueurs\n personnage << msg\n<|end_body_1|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass PrmListe:\n \"\"\"Commande 'bannir liste'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PrmListe:\n \"\"\"Commande 'bannir liste'.\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre\"\"\"\n Parametre.__init__(self, 'liste', 'list')\n self.aide_courte = 'liste les bannisements'\n self.aide_longue = 'Cette commande liste les bannissements actuels, temporaires ou prolongés, de joueurs, comptes ou adresses.'\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Interprétation du paramètre\"\"\"\n msg = 'Bannissements actuels :\\n'\n temporaires = []\n maintenant = datetime.now()\n for joueur, date in importeur.connex.bannissements_temporaires.items():\n msg_temp = joueur.nom + ' ('\n dans = (date - maintenant).total_seconds()\n mesure = 's'\n if maintenant > date or dans < 0:\n dans = 0\n elif dans >= 86400:\n dans //= 86400\n mesure = 'j'\n elif dans >= 3600:\n dans //= 3600\n mesure = 'h'\n elif dans >= 60:\n dans //= 60\n mesure = 'm'\n msg_temp += str(int(dans)) + mesure + ')'\n temporaires.append(msg_temp)\n temporaires = ', '.join(temporaires)\n if not temporaires:\n temporaires = '|att|aucun|ff|'\n joueurs = ', '.join((j.nom for j in importeur.connex.joueurs_bannis))\n if not joueurs:\n joueurs = '|att|aucun|ff|'\n msg += '\\n Bannissements temporaires : ' + temporaires\n msg += '\\n Bannissements de joueurs : ' + joueurs\n personnage << msg\n", "source": "the_stack_v2_python_sparse", "source_path": "src/primaires/joueur/commandes/bannir/liste.py", "source_repo": "vincent-lg/tsunami", "split": "val", "star_events_count": 5} {"blob_id": "d4d0081531fe0da503738abd16ff13fff8f9bc23", "bodies": ["profile_result = get_profile_data(kwargs)\nif not profile_result.ok:\n return WebsiteErrorView.website_error(request, WebsiteError.PROFILE_NOT_FOUND, {'profile_oid': profile_result.oid_org})\nroot_oid = get_root_oid(request)\nprofile_model = profile_result.model\nchannel_model = ChannelManager.get_channel_oid(profile_model.channel_oid)\npermissions = ProfileManager.get_user_permissions(channel_model.id, root_oid)\nprofile_ctrl = ProfileHelper.get_user_profile_controls(channel_model, profile_model.id, root_oid, permissions)\nreturn render_template(request, _('Profile Info - {}').format(profile_model.name), 'info/profile.html', {'profile_data': profile_model, 'profile_controls': profile_ctrl, 'perm_cats': list(ProfilePermission), 'is_default': profile_model.id == channel_model.config.default_profile_oid}, nav_param=kwargs)", "sender_oid = get_root_oid(request)\nprofile_result = get_profile_data(kwargs)\nif not profile_result.ok:\n return HttpResponse(status=404)\nchannel_model = ChannelManager.get_channel_oid(profile_result.model.channel_oid)\naction = InfoPageActionControl.parse(request.POST.get('action'))\ntarget_uid = safe_cast(request.POST.get('uid'), ObjectId)\nif not action.is_argument_valid(target_uid):\n return HttpResponse(status=400)\npermissions = ProfileManager.get_user_permissions(channel_model.id, sender_oid)\nprofile_oid = profile_result.model.id\nif action == InfoPageActionControl.DETACH:\n return InfoPageActionControl.action_detach(request, channel_model.id, sender_oid, target_uid, permissions, profile_oid)\nif action == InfoPageActionControl.DELETE:\n return InfoPageActionControl.action_delete(request, channel_model, profile_oid)\nreturn HttpResponse(status=501)"], "bodies_text": "<|body_start_0|>\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return WebsiteErrorView.website_error(request, WebsiteError.PROFILE_NOT_FOUND, {'profile_oid': profile_result.oid_org})\n root_oid = get_root_oid(request)\n profile_model = profile_result.model\n channel_model = ChannelManager.get_channel_oid(profile_model.channel_oid)\n permissions = ProfileManager.get_user_permissions(channel_model.id, root_oid)\n profile_ctrl = ProfileHelper.get_user_profile_controls(channel_model, profile_model.id, root_oid, permissions)\n return render_template(request, _('Profile Info - {}').format(profile_model.name), 'info/profile.html', {'profile_data': profile_model, 'profile_controls': profile_ctrl, 'perm_cats': list(ProfilePermission), 'is_default': profile_model.id == channel_model.config.default_profile_oid}, nav_param=kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n sender_oid = get_root_oid(request)\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return HttpResponse(status=404)\n channel_model = ChannelManager.get_channel_oid(profile_result.model.channel_oid)\n action = InfoPageActionControl.parse(request.POST.get('action'))\n target_uid = safe_cast(request.POST.get('uid'), ObjectId)\n if not action.is_argument_valid(target_uid):\n return HttpResponse(status=400)\n permissions = ProfileManager.get_user_permissions(channel_model.id, sender_oid)\n profile_oid = profile_result.model.id\n if action == InfoPageActionControl.DETACH:\n return InfoPageActionControl.action_detach(request, channel_model.id, sender_oid, target_uid, permissions, profile_oid)\n if action == InfoPageActionControl.DELETE:\n return InfoPageActionControl.action_delete(request, channel_model, profile_oid)\n return HttpResponse(status=501)\n<|end_body_1|>\n", "class_docstring": "View to see the profile info.", "class_name": "ProfileInfoView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProfileInfoView:\n \"\"\"View to see the profile info.\"\"\"\n\n def get(self, request, **kwargs):\n \"\"\"Page to see the profile info.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Handle the action request sent from the profile info page.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return WebsiteErrorView.website_error(request, WebsiteError.PROFILE_NOT_FOUND, {'profile_oid': profile_result.oid_org})\n root_oid = get_root_oid(request)\n profile_model = profile_result.model\n channel_model = ChannelManager.get_channel_oid(profile_model.channel_oid)\n permissions = ProfileManager.get_user_permissions(channel_model.id, root_oid)\n profile_ctrl = ProfileHelper.get_user_profile_controls(channel_model, profile_model.id, root_oid, permissions)\n return render_template(request, _('Profile Info - {}').format(profile_model.name), 'info/profile.html', {'profile_data': profile_model, 'profile_controls': profile_ctrl, 'perm_cats': list(ProfilePermission), 'is_default': profile_model.id == channel_model.config.default_profile_oid}, nav_param=kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n sender_oid = get_root_oid(request)\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return HttpResponse(status=404)\n channel_model = ChannelManager.get_channel_oid(profile_result.model.channel_oid)\n action = InfoPageActionControl.parse(request.POST.get('action'))\n target_uid = safe_cast(request.POST.get('uid'), ObjectId)\n if not action.is_argument_valid(target_uid):\n return HttpResponse(status=400)\n permissions = ProfileManager.get_user_permissions(channel_model.id, sender_oid)\n profile_oid = profile_result.model.id\n if action == InfoPageActionControl.DETACH:\n return InfoPageActionControl.action_detach(request, channel_model.id, sender_oid, target_uid, permissions, profile_oid)\n if action == InfoPageActionControl.DELETE:\n return InfoPageActionControl.action_delete(request, channel_model, profile_oid)\n return HttpResponse(status=501)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000346", "length_bytes": 7451, "license_type": "permissive", "methods": [{"docstring": "Page to see the profile info.", "name": "get", "signature": "def get(self, request, **kwargs)"}, {"docstring": "Handle the action request sent from the profile info page.", "name": "post", "signature": "def post(self, request, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `ProfileInfoView` described below.\n\nClass description:\nView to see the profile info.\n\nMethod signatures and docstrings:\n- def get(self, request, **kwargs): Page to see the profile info.\n- def post(self, request, **kwargs): Handle the action request sent from the profile info page.", "prompted_full_text": "Implement the Python class `ProfileInfoView` described below.\n\nClass description:\nView to see the profile info.\n\nMethod signatures and docstrings:\n- def get(self, request, **kwargs): Page to see the profile info.\n- def post(self, request, **kwargs): Handle the action request sent from the profile info page.\n\n<|skeleton|>\nclass ProfileInfoView:\n \"\"\"View to see the profile info.\"\"\"\n\n def get(self, request, **kwargs):\n \"\"\"Page to see the profile info.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Handle the action request sent from the profile info page.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return WebsiteErrorView.website_error(request, WebsiteError.PROFILE_NOT_FOUND, {'profile_oid': profile_result.oid_org})\n root_oid = get_root_oid(request)\n profile_model = profile_result.model\n channel_model = ChannelManager.get_channel_oid(profile_model.channel_oid)\n permissions = ProfileManager.get_user_permissions(channel_model.id, root_oid)\n profile_ctrl = ProfileHelper.get_user_profile_controls(channel_model, profile_model.id, root_oid, permissions)\n return render_template(request, _('Profile Info - {}').format(profile_model.name), 'info/profile.html', {'profile_data': profile_model, 'profile_controls': profile_ctrl, 'perm_cats': list(ProfilePermission), 'is_default': profile_model.id == channel_model.config.default_profile_oid}, nav_param=kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n sender_oid = get_root_oid(request)\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return HttpResponse(status=404)\n channel_model = ChannelManager.get_channel_oid(profile_result.model.channel_oid)\n action = InfoPageActionControl.parse(request.POST.get('action'))\n target_uid = safe_cast(request.POST.get('uid'), ObjectId)\n if not action.is_argument_valid(target_uid):\n return HttpResponse(status=400)\n permissions = ProfileManager.get_user_permissions(channel_model.id, sender_oid)\n profile_oid = profile_result.model.id\n if action == InfoPageActionControl.DETACH:\n return InfoPageActionControl.action_detach(request, channel_model.id, sender_oid, target_uid, permissions, profile_oid)\n if action == InfoPageActionControl.DELETE:\n return InfoPageActionControl.action_delete(request, channel_model, profile_oid)\n return HttpResponse(status=501)\n<|end_body_1|>\n", "revision_id": "c7da1e91783dce3a2b71b955b3a22b68db9056cf", "skeleton": "<|skeleton|>\nclass ProfileInfoView:\n \"\"\"View to see the profile info.\"\"\"\n\n def get(self, request, **kwargs):\n \"\"\"Page to see the profile info.\"\"\"\n <|body_0|>\n\n def post(self, request, **kwargs):\n \"\"\"Handle the action request sent from the profile info page.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProfileInfoView:\n \"\"\"View to see the profile info.\"\"\"\n\n def get(self, request, **kwargs):\n \"\"\"Page to see the profile info.\"\"\"\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return WebsiteErrorView.website_error(request, WebsiteError.PROFILE_NOT_FOUND, {'profile_oid': profile_result.oid_org})\n root_oid = get_root_oid(request)\n profile_model = profile_result.model\n channel_model = ChannelManager.get_channel_oid(profile_model.channel_oid)\n permissions = ProfileManager.get_user_permissions(channel_model.id, root_oid)\n profile_ctrl = ProfileHelper.get_user_profile_controls(channel_model, profile_model.id, root_oid, permissions)\n return render_template(request, _('Profile Info - {}').format(profile_model.name), 'info/profile.html', {'profile_data': profile_model, 'profile_controls': profile_ctrl, 'perm_cats': list(ProfilePermission), 'is_default': profile_model.id == channel_model.config.default_profile_oid}, nav_param=kwargs)\n\n def post(self, request, **kwargs):\n \"\"\"Handle the action request sent from the profile info page.\"\"\"\n sender_oid = get_root_oid(request)\n profile_result = get_profile_data(kwargs)\n if not profile_result.ok:\n return HttpResponse(status=404)\n channel_model = ChannelManager.get_channel_oid(profile_result.model.channel_oid)\n action = InfoPageActionControl.parse(request.POST.get('action'))\n target_uid = safe_cast(request.POST.get('uid'), ObjectId)\n if not action.is_argument_valid(target_uid):\n return HttpResponse(status=400)\n permissions = ProfileManager.get_user_permissions(channel_model.id, sender_oid)\n profile_oid = profile_result.model.id\n if action == InfoPageActionControl.DETACH:\n return InfoPageActionControl.action_detach(request, channel_model.id, sender_oid, target_uid, permissions, profile_oid)\n if action == InfoPageActionControl.DELETE:\n return InfoPageActionControl.action_delete(request, channel_model, profile_oid)\n return HttpResponse(status=501)\n", "source": "the_stack_v2_python_sparse", "source_path": "JellyBot/views/info/profile.py", "source_repo": "RxJellyBot/Jelly-Bot", "split": "val", "star_events_count": 5} {"blob_id": "fda3e0c1928384564a0e661b68a02f6458ebf50e", "bodies": ["super().__init__(message.as_dict(), namespaced)\nself._message = message\nself.__module__ = 'libpod'", "if hasattr(self._message, method):\n return getattr(self._message, method)\ntry:\n return self._message.parameters()[method]\nexcept KeyError as ex:\n raise AttributeError('No such attribute: {}'.format(method)) from ex"], "bodies_text": "<|body_start_0|>\n super().__init__(message.as_dict(), namespaced)\n self._message = message\n self.__module__ = 'libpod'\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self._message, method):\n return getattr(self._message, method)\n try:\n return self._message.parameters()[method]\n except KeyError as ex:\n raise AttributeError('No such attribute: {}'.format(method)) from ex\n<|end_body_1|>\n", "class_docstring": "Class to Proxy VarlinkError methods.", "class_name": "VarlinkErrorProxy", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VarlinkErrorProxy:\n \"\"\"Class to Proxy VarlinkError methods.\"\"\"\n\n def __init__(self, message, namespaced=False):\n \"\"\"Construct proxy from Exception.\"\"\"\n <|body_0|>\n\n def __getattr__(self, method):\n \"\"\"Return attribute from proxied Exception.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(message.as_dict(), namespaced)\n self._message = message\n self.__module__ = 'libpod'\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self._message, method):\n return getattr(self._message, method)\n try:\n return self._message.parameters()[method]\n except KeyError as ex:\n raise AttributeError('No such attribute: {}'.format(method)) from ex\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000347", "length_bytes": 2437, "license_type": "permissive", "methods": [{"docstring": "Construct proxy from Exception.", "name": "__init__", "signature": "def __init__(self, message, namespaced=False)"}, {"docstring": "Return attribute from proxied Exception.", "name": "__getattr__", "signature": "def __getattr__(self, method)"}], "n_methods": 2, "prompt": "Implement the Python class `VarlinkErrorProxy` described below.\n\nClass description:\nClass to Proxy VarlinkError methods.\n\nMethod signatures and docstrings:\n- def __init__(self, message, namespaced=False): Construct proxy from Exception.\n- def __getattr__(self, method): Return attribute from proxied Exception.", "prompted_full_text": "Implement the Python class `VarlinkErrorProxy` described below.\n\nClass description:\nClass to Proxy VarlinkError methods.\n\nMethod signatures and docstrings:\n- def __init__(self, message, namespaced=False): Construct proxy from Exception.\n- def __getattr__(self, method): Return attribute from proxied Exception.\n\n<|skeleton|>\nclass VarlinkErrorProxy:\n \"\"\"Class to Proxy VarlinkError methods.\"\"\"\n\n def __init__(self, message, namespaced=False):\n \"\"\"Construct proxy from Exception.\"\"\"\n <|body_0|>\n\n def __getattr__(self, method):\n \"\"\"Return attribute from proxied Exception.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(message.as_dict(), namespaced)\n self._message = message\n self.__module__ = 'libpod'\n<|end_body_0|>\n\n<|body_start_1|>\n if hasattr(self._message, method):\n return getattr(self._message, method)\n try:\n return self._message.parameters()[method]\n except KeyError as ex:\n raise AttributeError('No such attribute: {}'.format(method)) from ex\n<|end_body_1|>\n", "revision_id": "ce2a8734f8b4203ec38078207297062263c49f6f", "skeleton": "<|skeleton|>\nclass VarlinkErrorProxy:\n \"\"\"Class to Proxy VarlinkError methods.\"\"\"\n\n def __init__(self, message, namespaced=False):\n \"\"\"Construct proxy from Exception.\"\"\"\n <|body_0|>\n\n def __getattr__(self, method):\n \"\"\"Return attribute from proxied Exception.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VarlinkErrorProxy:\n \"\"\"Class to Proxy VarlinkError methods.\"\"\"\n\n def __init__(self, message, namespaced=False):\n \"\"\"Construct proxy from Exception.\"\"\"\n super().__init__(message.as_dict(), namespaced)\n self._message = message\n self.__module__ = 'libpod'\n\n def __getattr__(self, method):\n \"\"\"Return attribute from proxied Exception.\"\"\"\n if hasattr(self._message, method):\n return getattr(self._message, method)\n try:\n return self._message.parameters()[method]\n except KeyError as ex:\n raise AttributeError('No such attribute: {}'.format(method)) from ex\n", "source": "the_stack_v2_python_sparse", "source_path": "tobiko/podman/_podman1/libs/errors.py", "source_repo": "FedericoRessi/tobiko", "split": "val", "star_events_count": 1} {"blob_id": "e484535231d77f3b6d743eddd8c9a7547fe78274", "bodies": ["super(Reranker, self).__init__(auto_prefix=False)\nself.encoder = Albert(batch_size)\nparam_dict = load_checkpoint(encoder_ck_file)\nnot_load_params_1, _ = load_param_into_net(self.encoder, param_dict)\nprint(f're-ranker albert not loaded params: {not_load_params_1}')\nself.no_answer_mlp = Rerank_Downstream()\nparam_dict = load_checkpoint(downstream_ck_file)\nnot_load_params_2, _ = load_param_into_net(self.no_answer_mlp, param_dict)\nprint(f're-ranker downstream not loaded params: {not_load_params_2}')", "state = self.encoder(input_ids, attn_mask, token_type_ids)\nstate = state[:, 0, :]\nno_answer = self.no_answer_mlp(state)\nreturn no_answer"], "bodies_text": "<|body_start_0|>\n super(Reranker, self).__init__(auto_prefix=False)\n self.encoder = Albert(batch_size)\n param_dict = load_checkpoint(encoder_ck_file)\n not_load_params_1, _ = load_param_into_net(self.encoder, param_dict)\n print(f're-ranker albert not loaded params: {not_load_params_1}')\n self.no_answer_mlp = Rerank_Downstream()\n param_dict = load_checkpoint(downstream_ck_file)\n not_load_params_2, _ = load_param_into_net(self.no_answer_mlp, param_dict)\n print(f're-ranker downstream not loaded params: {not_load_params_2}')\n<|end_body_0|>\n\n<|body_start_1|>\n state = self.encoder(input_ids, attn_mask, token_type_ids)\n state = state[:, 0, :]\n no_answer = self.no_answer_mlp(state)\n return no_answer\n<|end_body_1|>\n", "class_docstring": "Reranker model", "class_name": "Reranker", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Reranker:\n \"\"\"Reranker model\"\"\"\n\n def __init__(self, batch_size, encoder_ck_file, downstream_ck_file):\n \"\"\"init function\"\"\"\n <|body_0|>\n\n def construct(self, input_ids, attn_mask, token_type_ids):\n \"\"\"construct function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Reranker, self).__init__(auto_prefix=False)\n self.encoder = Albert(batch_size)\n param_dict = load_checkpoint(encoder_ck_file)\n not_load_params_1, _ = load_param_into_net(self.encoder, param_dict)\n print(f're-ranker albert not loaded params: {not_load_params_1}')\n self.no_answer_mlp = Rerank_Downstream()\n param_dict = load_checkpoint(downstream_ck_file)\n not_load_params_2, _ = load_param_into_net(self.no_answer_mlp, param_dict)\n print(f're-ranker downstream not loaded params: {not_load_params_2}')\n<|end_body_0|>\n\n<|body_start_1|>\n state = self.encoder(input_ids, attn_mask, token_type_ids)\n state = state[:, 0, :]\n no_answer = self.no_answer_mlp(state)\n return no_answer\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000348", "length_bytes": 1850, "license_type": "permissive", "methods": [{"docstring": "init function", "name": "__init__", "signature": "def __init__(self, batch_size, encoder_ck_file, downstream_ck_file)"}, {"docstring": "construct function", "name": "construct", "signature": "def construct(self, input_ids, attn_mask, token_type_ids)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003397", "prompt": "Implement the Python class `Reranker` described below.\n\nClass description:\nReranker model\n\nMethod signatures and docstrings:\n- def __init__(self, batch_size, encoder_ck_file, downstream_ck_file): init function\n- def construct(self, input_ids, attn_mask, token_type_ids): construct function", "prompted_full_text": "Implement the Python class `Reranker` described below.\n\nClass description:\nReranker model\n\nMethod signatures and docstrings:\n- def __init__(self, batch_size, encoder_ck_file, downstream_ck_file): init function\n- def construct(self, input_ids, attn_mask, token_type_ids): construct function\n\n<|skeleton|>\nclass Reranker:\n \"\"\"Reranker model\"\"\"\n\n def __init__(self, batch_size, encoder_ck_file, downstream_ck_file):\n \"\"\"init function\"\"\"\n <|body_0|>\n\n def construct(self, input_ids, attn_mask, token_type_ids):\n \"\"\"construct function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Reranker, self).__init__(auto_prefix=False)\n self.encoder = Albert(batch_size)\n param_dict = load_checkpoint(encoder_ck_file)\n not_load_params_1, _ = load_param_into_net(self.encoder, param_dict)\n print(f're-ranker albert not loaded params: {not_load_params_1}')\n self.no_answer_mlp = Rerank_Downstream()\n param_dict = load_checkpoint(downstream_ck_file)\n not_load_params_2, _ = load_param_into_net(self.no_answer_mlp, param_dict)\n print(f're-ranker downstream not loaded params: {not_load_params_2}')\n<|end_body_0|>\n\n<|body_start_1|>\n state = self.encoder(input_ids, attn_mask, token_type_ids)\n state = state[:, 0, :]\n no_answer = self.no_answer_mlp(state)\n return no_answer\n<|end_body_1|>\n", "revision_id": "eab643f51336dbf7d711f02d27e6516e5affee59", "skeleton": "<|skeleton|>\nclass Reranker:\n \"\"\"Reranker model\"\"\"\n\n def __init__(self, batch_size, encoder_ck_file, downstream_ck_file):\n \"\"\"init function\"\"\"\n <|body_0|>\n\n def construct(self, input_ids, attn_mask, token_type_ids):\n \"\"\"construct function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Reranker:\n \"\"\"Reranker model\"\"\"\n\n def __init__(self, batch_size, encoder_ck_file, downstream_ck_file):\n \"\"\"init function\"\"\"\n super(Reranker, self).__init__(auto_prefix=False)\n self.encoder = Albert(batch_size)\n param_dict = load_checkpoint(encoder_ck_file)\n not_load_params_1, _ = load_param_into_net(self.encoder, param_dict)\n print(f're-ranker albert not loaded params: {not_load_params_1}')\n self.no_answer_mlp = Rerank_Downstream()\n param_dict = load_checkpoint(downstream_ck_file)\n not_load_params_2, _ = load_param_into_net(self.no_answer_mlp, param_dict)\n print(f're-ranker downstream not loaded params: {not_load_params_2}')\n\n def construct(self, input_ids, attn_mask, token_type_ids):\n \"\"\"construct function\"\"\"\n state = self.encoder(input_ids, attn_mask, token_type_ids)\n state = state[:, 0, :]\n no_answer = self.no_answer_mlp(state)\n return no_answer\n", "source": "the_stack_v2_python_sparse", "source_path": "research/nlp/tprr/src/reranker.py", "source_repo": "mindspore-ai/models", "split": "val", "star_events_count": 301} {"blob_id": "5cb65b2bb996e48d093c3ff169ee46193a470b88", "bodies": ["input_lines = [\"goog.provide('package.xyz');\", '/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\"]\nexpected_lines = ['/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\", \"goog.provide('package.xyz');\"]\ntoken = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\nsorter = requireprovidesorter.RequireProvideSorter()\nfixed_provide_string = sorter.GetFixedProvideString(token)\nself.assertEquals(expected_lines, fixed_provide_string.splitlines())", "input_lines = [\"goog.require('package.xyz');\", '/** This is needed for scope. **/', \"goog.require('package.abcd');\"]\nexpected_lines = ['/** This is needed for scope. **/', \"goog.require('package.abcd');\", \"goog.require('package.xyz');\"]\ntoken = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\nsorter = requireprovidesorter.RequireProvideSorter()\nfixed_require_string = sorter.GetFixedRequireString(token)\nself.assertEquals(expected_lines, fixed_require_string.splitlines())", "input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", '', \"goog.require('package.subpackage.ClassA');\"]\nexpected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\ntoken = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\nsorter = requireprovidesorter.RequireProvideSorter()\nsorter.FixRequires(token)\nself.assertEquals(expected_lines, self._GetLines(token))", "input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", \"goog.require('package.subpackage.ClassA');\"]\nexpected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\ninput_lines.insert(position, 'goog.setTestOnly();')\nexpected_lines.insert(position, 'goog.setTestOnly();')\ntoken = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\nsorter = requireprovidesorter.RequireProvideSorter()\nsorter.FixRequires(token)\nself.assertEquals(expected_lines, self._GetLines(token))", "self.fixRequiresTest_withTestOnly(position=0)\nself.fixRequiresTest_withTestOnly(position=1)\nself.fixRequiresTest_withTestOnly(position=2)\nself.fixRequiresTest_withTestOnly(position=4)", "lines = []\nline = ''\nwhile token:\n line += token.string\n if token.IsLastInLine():\n lines.append(line)\n line = ''\n token = token.next\nreturn lines"], "bodies_text": "<|body_start_0|>\n input_lines = [\"goog.provide('package.xyz');\", '/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\"]\n expected_lines = ['/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\", \"goog.provide('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_provide_string = sorter.GetFixedProvideString(token)\n self.assertEquals(expected_lines, fixed_provide_string.splitlines())\n<|end_body_0|>\n\n<|body_start_1|>\n input_lines = [\"goog.require('package.xyz');\", '/** This is needed for scope. **/', \"goog.require('package.abcd');\"]\n expected_lines = ['/** This is needed for scope. **/', \"goog.require('package.abcd');\", \"goog.require('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_require_string = sorter.GetFixedRequireString(token)\n self.assertEquals(expected_lines, fixed_require_string.splitlines())\n<|end_body_1|>\n\n<|body_start_2|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", '', \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_2|>\n\n<|body_start_3|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n input_lines.insert(position, 'goog.setTestOnly();')\n expected_lines.insert(position, 'goog.setTestOnly();')\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_3|>\n\n<|body_start_4|>\n self.fixRequiresTest_withTestOnly(position=0)\n self.fixRequiresTest_withTestOnly(position=1)\n self.fixRequiresTest_withTestOnly(position=2)\n self.fixRequiresTest_withTestOnly(position=4)\n<|end_body_4|>\n\n<|body_start_5|>\n lines = []\n line = ''\n while token:\n line += token.string\n if token.IsLastInLine():\n lines.append(line)\n line = ''\n token = token.next\n return lines\n<|end_body_5|>\n", "class_docstring": "Tests for RequireProvideSorter.", "class_name": "RequireProvideSorterTest", "detected_licenses": ["BSD-3-Clause", "Apache-2.0", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RequireProvideSorterTest:\n \"\"\"Tests for RequireProvideSorter.\"\"\"\n\n def testGetFixedProvideString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_0|>\n\n def testGetFixedRequireString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_1|>\n\n def testFixRequires_removeBlankLines(self):\n \"\"\"Tests that blank lines are omitted in sorted goog.require statements.\"\"\"\n <|body_2|>\n\n def fixRequiresTest_withTestOnly(self, position):\n \"\"\"Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\"\"\"\n <|body_3|>\n\n def testFixRequires_withTestOnly(self):\n \"\"\"Regression-tests sorting even after a goog.setTestOnly statement.\"\"\"\n <|body_4|>\n\n def _GetLines(self, token):\n \"\"\"Returns an array of lines based on the specified token stream.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_lines = [\"goog.provide('package.xyz');\", '/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\"]\n expected_lines = ['/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\", \"goog.provide('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_provide_string = sorter.GetFixedProvideString(token)\n self.assertEquals(expected_lines, fixed_provide_string.splitlines())\n<|end_body_0|>\n\n<|body_start_1|>\n input_lines = [\"goog.require('package.xyz');\", '/** This is needed for scope. **/', \"goog.require('package.abcd');\"]\n expected_lines = ['/** This is needed for scope. **/', \"goog.require('package.abcd');\", \"goog.require('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_require_string = sorter.GetFixedRequireString(token)\n self.assertEquals(expected_lines, fixed_require_string.splitlines())\n<|end_body_1|>\n\n<|body_start_2|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", '', \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_2|>\n\n<|body_start_3|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n input_lines.insert(position, 'goog.setTestOnly();')\n expected_lines.insert(position, 'goog.setTestOnly();')\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_3|>\n\n<|body_start_4|>\n self.fixRequiresTest_withTestOnly(position=0)\n self.fixRequiresTest_withTestOnly(position=1)\n self.fixRequiresTest_withTestOnly(position=2)\n self.fixRequiresTest_withTestOnly(position=4)\n<|end_body_4|>\n\n<|body_start_5|>\n lines = []\n line = ''\n while token:\n line += token.string\n if token.IsLastInLine():\n lines.append(line)\n line = ''\n token = token.next\n return lines\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000349", "length_bytes": 5048, "license_type": "permissive", "methods": [{"docstring": "Tests that fixed string constains proper comments also.", "name": "testGetFixedProvideString", "signature": "def testGetFixedProvideString(self)"}, {"docstring": "Tests that fixed string constains proper comments also.", "name": "testGetFixedRequireString", "signature": "def testGetFixedRequireString(self)"}, {"docstring": "Tests that blank lines are omitted in sorted goog.require statements.", "name": "testFixRequires_removeBlankLines", "signature": "def testFixRequires_removeBlankLines(self)"}, {"docstring": "Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.", "name": "fixRequiresTest_withTestOnly", "signature": "def fixRequiresTest_withTestOnly(self, position)"}, {"docstring": "Regression-tests sorting even after a goog.setTestOnly statement.", "name": "testFixRequires_withTestOnly", "signature": "def testFixRequires_withTestOnly(self)"}, {"docstring": "Returns an array of lines based on the specified token stream.", "name": "_GetLines", "signature": "def _GetLines(self, token)"}], "n_methods": 6, "prompt": "Implement the Python class `RequireProvideSorterTest` described below.\n\nClass description:\nTests for RequireProvideSorter.\n\nMethod signatures and docstrings:\n- def testGetFixedProvideString(self): Tests that fixed string constains proper comments also.\n- def testGetFixedRequireString(self): Tests that fixed string constains proper comments also.\n- def testFixRequires_removeBlankLines(self): Tests that blank lines are omitted in sorted goog.require statements.\n- def fixRequiresTest_withTestOnly(self, position): Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\n- def testFixRequires_withTestOnly(self): Regression-tests sorting even after a goog.setTestOnly statement.\n- def _GetLines(self, token): Returns an array of lines based on the specified token stream.", "prompted_full_text": "Implement the Python class `RequireProvideSorterTest` described below.\n\nClass description:\nTests for RequireProvideSorter.\n\nMethod signatures and docstrings:\n- def testGetFixedProvideString(self): Tests that fixed string constains proper comments also.\n- def testGetFixedRequireString(self): Tests that fixed string constains proper comments also.\n- def testFixRequires_removeBlankLines(self): Tests that blank lines are omitted in sorted goog.require statements.\n- def fixRequiresTest_withTestOnly(self, position): Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\n- def testFixRequires_withTestOnly(self): Regression-tests sorting even after a goog.setTestOnly statement.\n- def _GetLines(self, token): Returns an array of lines based on the specified token stream.\n\n<|skeleton|>\nclass RequireProvideSorterTest:\n \"\"\"Tests for RequireProvideSorter.\"\"\"\n\n def testGetFixedProvideString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_0|>\n\n def testGetFixedRequireString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_1|>\n\n def testFixRequires_removeBlankLines(self):\n \"\"\"Tests that blank lines are omitted in sorted goog.require statements.\"\"\"\n <|body_2|>\n\n def fixRequiresTest_withTestOnly(self, position):\n \"\"\"Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\"\"\"\n <|body_3|>\n\n def testFixRequires_withTestOnly(self):\n \"\"\"Regression-tests sorting even after a goog.setTestOnly statement.\"\"\"\n <|body_4|>\n\n def _GetLines(self, token):\n \"\"\"Returns an array of lines based on the specified token stream.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_lines = [\"goog.provide('package.xyz');\", '/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\"]\n expected_lines = ['/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\", \"goog.provide('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_provide_string = sorter.GetFixedProvideString(token)\n self.assertEquals(expected_lines, fixed_provide_string.splitlines())\n<|end_body_0|>\n\n<|body_start_1|>\n input_lines = [\"goog.require('package.xyz');\", '/** This is needed for scope. **/', \"goog.require('package.abcd');\"]\n expected_lines = ['/** This is needed for scope. **/', \"goog.require('package.abcd');\", \"goog.require('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_require_string = sorter.GetFixedRequireString(token)\n self.assertEquals(expected_lines, fixed_require_string.splitlines())\n<|end_body_1|>\n\n<|body_start_2|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", '', \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_2|>\n\n<|body_start_3|>\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n input_lines.insert(position, 'goog.setTestOnly();')\n expected_lines.insert(position, 'goog.setTestOnly();')\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n<|end_body_3|>\n\n<|body_start_4|>\n self.fixRequiresTest_withTestOnly(position=0)\n self.fixRequiresTest_withTestOnly(position=1)\n self.fixRequiresTest_withTestOnly(position=2)\n self.fixRequiresTest_withTestOnly(position=4)\n<|end_body_4|>\n\n<|body_start_5|>\n lines = []\n line = ''\n while token:\n line += token.string\n if token.IsLastInLine():\n lines.append(line)\n line = ''\n token = token.next\n return lines\n<|end_body_5|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass RequireProvideSorterTest:\n \"\"\"Tests for RequireProvideSorter.\"\"\"\n\n def testGetFixedProvideString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_0|>\n\n def testGetFixedRequireString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n <|body_1|>\n\n def testFixRequires_removeBlankLines(self):\n \"\"\"Tests that blank lines are omitted in sorted goog.require statements.\"\"\"\n <|body_2|>\n\n def fixRequiresTest_withTestOnly(self, position):\n \"\"\"Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\"\"\"\n <|body_3|>\n\n def testFixRequires_withTestOnly(self):\n \"\"\"Regression-tests sorting even after a goog.setTestOnly statement.\"\"\"\n <|body_4|>\n\n def _GetLines(self, token):\n \"\"\"Returns an array of lines based on the specified token stream.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RequireProvideSorterTest:\n \"\"\"Tests for RequireProvideSorter.\"\"\"\n\n def testGetFixedProvideString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n input_lines = [\"goog.provide('package.xyz');\", '/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\"]\n expected_lines = ['/** @suppress {extraprovide} **/', \"goog.provide('package.abcd');\", \"goog.provide('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_provide_string = sorter.GetFixedProvideString(token)\n self.assertEquals(expected_lines, fixed_provide_string.splitlines())\n\n def testGetFixedRequireString(self):\n \"\"\"Tests that fixed string constains proper comments also.\"\"\"\n input_lines = [\"goog.require('package.xyz');\", '/** This is needed for scope. **/', \"goog.require('package.abcd');\"]\n expected_lines = ['/** This is needed for scope. **/', \"goog.require('package.abcd');\", \"goog.require('package.xyz');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n fixed_require_string = sorter.GetFixedRequireString(token)\n self.assertEquals(expected_lines, fixed_require_string.splitlines())\n\n def testFixRequires_removeBlankLines(self):\n \"\"\"Tests that blank lines are omitted in sorted goog.require statements.\"\"\"\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", '', \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n\n def fixRequiresTest_withTestOnly(self, position):\n \"\"\"Regression-tests sorting even with a goog.setTestOnly statement. Args: position: The position in the list where to insert the goog.setTestOnly statement. Will be used to test all possible combinations for this test.\"\"\"\n input_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassB');\", \"goog.require('package.subpackage.ClassA');\"]\n expected_lines = [\"goog.provide('package.subpackage.Whatever');\", '', \"goog.require('package.subpackage.ClassA');\", \"goog.require('package.subpackage.ClassB');\"]\n input_lines.insert(position, 'goog.setTestOnly();')\n expected_lines.insert(position, 'goog.setTestOnly();')\n token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)\n sorter = requireprovidesorter.RequireProvideSorter()\n sorter.FixRequires(token)\n self.assertEquals(expected_lines, self._GetLines(token))\n\n def testFixRequires_withTestOnly(self):\n \"\"\"Regression-tests sorting even after a goog.setTestOnly statement.\"\"\"\n self.fixRequiresTest_withTestOnly(position=0)\n self.fixRequiresTest_withTestOnly(position=1)\n self.fixRequiresTest_withTestOnly(position=2)\n self.fixRequiresTest_withTestOnly(position=4)\n\n def _GetLines(self, token):\n \"\"\"Returns an array of lines based on the specified token stream.\"\"\"\n lines = []\n line = ''\n while token:\n line += token.string\n if token.IsLastInLine():\n lines.append(line)\n line = ''\n token = token.next\n return lines\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/catapult/third_party/closure_linter/closure_linter/requireprovidesorter_test.py", "source_repo": "metux/chromium-suckless", "split": "val", "star_events_count": 5} {"blob_id": "5dbcada26174abe15d3fab2e2fa678c1c8888d83", "bodies": ["super().__init__(config or {}, *args, config_key=config_key, **kwargs)\nself.yes = yes\nself.no = no\nself.hide_value = hide_value", "result = self.yes if str(value).lower() in self.yes_values else self.no\nif result == self.hide_value:\n return None\nreturn super().handle(result, context) if self.mapping else result"], "bodies_text": "<|body_start_0|>\n super().__init__(config or {}, *args, config_key=config_key, **kwargs)\n self.yes = yes\n self.no = no\n self.hide_value = hide_value\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.yes if str(value).lower() in self.yes_values else self.no\n if result == self.hide_value:\n return None\n return super().handle(result, context) if self.mapping else result\n<|end_body_1|>\n", "class_docstring": "Yes or No handler.", "class_name": "YesNo", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YesNo:\n \"\"\"Yes or No handler.\"\"\"\n\n def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def handle(self, value, context):\n \"\"\"Handle boolean values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config or {}, *args, config_key=config_key, **kwargs)\n self.yes = yes\n self.no = no\n self.hide_value = hide_value\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.yes if str(value).lower() in self.yes_values else self.no\n if result == self.hide_value:\n return None\n return super().handle(result, context) if self.mapping else result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000350", "length_bytes": 4580, "license_type": "permissive", "methods": [{"docstring": "Init method.", "name": "__init__", "signature": "def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs)"}, {"docstring": "Handle boolean values.", "name": "handle", "signature": "def handle(self, value, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002262", "prompt": "Implement the Python class `YesNo` described below.\n\nClass description:\nYes or No handler.\n\nMethod signatures and docstrings:\n- def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs): Init method.\n- def handle(self, value, context): Handle boolean values.", "prompted_full_text": "Implement the Python class `YesNo` described below.\n\nClass description:\nYes or No handler.\n\nMethod signatures and docstrings:\n- def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs): Init method.\n- def handle(self, value, context): Handle boolean values.\n\n<|skeleton|>\nclass YesNo:\n \"\"\"Yes or No handler.\"\"\"\n\n def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def handle(self, value, context):\n \"\"\"Handle boolean values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config or {}, *args, config_key=config_key, **kwargs)\n self.yes = yes\n self.no = no\n self.hide_value = hide_value\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.yes if str(value).lower() in self.yes_values else self.no\n if result == self.hide_value:\n return None\n return super().handle(result, context) if self.mapping else result\n<|end_body_1|>\n", "revision_id": "00909d2c47d158bfeac300e1d7477c4f87c52096", "skeleton": "<|skeleton|>\nclass YesNo:\n \"\"\"Yes or No handler.\"\"\"\n\n def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs):\n \"\"\"Init method.\"\"\"\n <|body_0|>\n\n def handle(self, value, context):\n \"\"\"Handle boolean values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class YesNo:\n \"\"\"Yes or No handler.\"\"\"\n\n def __init__(self, *args: str, yes=True, no=False, hide_value=None, config: typing.Optional[typing.Mapping[str, typing.Mapping]]=None, config_key: typing.Optional[str]=None, **kwargs):\n \"\"\"Init method.\"\"\"\n super().__init__(config or {}, *args, config_key=config_key, **kwargs)\n self.yes = yes\n self.no = no\n self.hide_value = hide_value\n\n def handle(self, value, context):\n \"\"\"Handle boolean values.\"\"\"\n result = self.yes if str(value).lower() in self.yes_values else self.no\n if result == self.hide_value:\n return None\n return super().handle(result, context) if self.mapping else result\n", "source": "the_stack_v2_python_sparse", "source_path": "knowit/properties/general.py", "source_repo": "ratoaq2/knowit", "split": "val", "star_events_count": 27} {"blob_id": "28fdbff0af061fcdc4b58a724418db2d027abc6a", "bodies": ["recent = self.listRecentPaths()\nif path in recent:\n return\nrecent.append(path)\nif len(recent) > 10:\n recent = recent[-10:]\nrecent = op.pathsep.join(recent)\nfslsettings.write('fsleyes.recentFiles', recent)\nself.notify()", "recent = fslsettings.read('fsleyes.recentFiles', None)\nif recent is None:\n recent = []\nelse:\n recent = recent.split(op.pathsep)\nreturn [f for f in recent if op.exists(f)]"], "bodies_text": "<|body_start_0|>\n recent = self.listRecentPaths()\n if path in recent:\n return\n recent.append(path)\n if len(recent) > 10:\n recent = recent[-10:]\n recent = op.pathsep.join(recent)\n fslsettings.write('fsleyes.recentFiles', recent)\n self.notify()\n<|end_body_0|>\n\n<|body_start_1|>\n recent = fslsettings.read('fsleyes.recentFiles', None)\n if recent is None:\n recent = []\n else:\n recent = recent.split(op.pathsep)\n return [f for f in recent if op.exists(f)]\n<|end_body_1|>\n", "class_docstring": "The ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.", "class_name": "RecentPathManager", "detected_licenses": ["BSD-3-Clause", "CC-BY-3.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecentPathManager:\n \"\"\"The ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\"\"\"\n\n def recordPath(self, path):\n \"\"\"Adds the given ``path`` to the recent files list.\"\"\"\n <|body_0|>\n\n def listRecentPaths(self):\n \"\"\"Returns a list of recently loaded files.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n recent = self.listRecentPaths()\n if path in recent:\n return\n recent.append(path)\n if len(recent) > 10:\n recent = recent[-10:]\n recent = op.pathsep.join(recent)\n fslsettings.write('fsleyes.recentFiles', recent)\n self.notify()\n<|end_body_0|>\n\n<|body_start_1|>\n recent = fslsettings.read('fsleyes.recentFiles', None)\n if recent is None:\n recent = []\n else:\n recent = recent.split(op.pathsep)\n return [f for f in recent if op.exists(f)]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000351", "length_bytes": 16912, "license_type": "permissive", "methods": [{"docstring": "Adds the given ``path`` to the recent files list.", "name": "recordPath", "signature": "def recordPath(self, path)"}, {"docstring": "Returns a list of recently loaded files.", "name": "listRecentPaths", "signature": "def listRecentPaths(self)"}], "n_methods": 2, "prompt": "Implement the Python class `RecentPathManager` described below.\n\nClass description:\nThe ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\n\nMethod signatures and docstrings:\n- def recordPath(self, path): Adds the given ``path`` to the recent files list.\n- def listRecentPaths(self): Returns a list of recently loaded files.", "prompted_full_text": "Implement the Python class `RecentPathManager` described below.\n\nClass description:\nThe ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\n\nMethod signatures and docstrings:\n- def recordPath(self, path): Adds the given ``path`` to the recent files list.\n- def listRecentPaths(self): Returns a list of recently loaded files.\n\n<|skeleton|>\nclass RecentPathManager:\n \"\"\"The ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\"\"\"\n\n def recordPath(self, path):\n \"\"\"Adds the given ``path`` to the recent files list.\"\"\"\n <|body_0|>\n\n def listRecentPaths(self):\n \"\"\"Returns a list of recently loaded files.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n recent = self.listRecentPaths()\n if path in recent:\n return\n recent.append(path)\n if len(recent) > 10:\n recent = recent[-10:]\n recent = op.pathsep.join(recent)\n fslsettings.write('fsleyes.recentFiles', recent)\n self.notify()\n<|end_body_0|>\n\n<|body_start_1|>\n recent = fslsettings.read('fsleyes.recentFiles', None)\n if recent is None:\n recent = []\n else:\n recent = recent.split(op.pathsep)\n return [f for f in recent if op.exists(f)]\n<|end_body_1|>\n", "revision_id": "46ccb4fe2b2346eb57576247f49714032b61307a", "skeleton": "<|skeleton|>\nclass RecentPathManager:\n \"\"\"The ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\"\"\"\n\n def recordPath(self, path):\n \"\"\"Adds the given ``path`` to the recent files list.\"\"\"\n <|body_0|>\n\n def listRecentPaths(self):\n \"\"\"Returns a list of recently loaded files.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RecentPathManager:\n \"\"\"The ``RecentPathManager`` is a simple class which provides access to a list of recently loaded files, and can notify registered listeners when that list changes. See the :attr:`recentPathManager` singleton instance.\"\"\"\n\n def recordPath(self, path):\n \"\"\"Adds the given ``path`` to the recent files list.\"\"\"\n recent = self.listRecentPaths()\n if path in recent:\n return\n recent.append(path)\n if len(recent) > 10:\n recent = recent[-10:]\n recent = op.pathsep.join(recent)\n fslsettings.write('fsleyes.recentFiles', recent)\n self.notify()\n\n def listRecentPaths(self):\n \"\"\"Returns a list of recently loaded files.\"\"\"\n recent = fslsettings.read('fsleyes.recentFiles', None)\n if recent is None:\n recent = []\n else:\n recent = recent.split(op.pathsep)\n return [f for f in recent if op.exists(f)]\n", "source": "the_stack_v2_python_sparse", "source_path": "fsleyes/actions/loadoverlay.py", "source_repo": "sanjayankur31/fsleyes", "split": "val", "star_events_count": 1} {"blob_id": "217a341aee4b7786ca130dac10d7d26db2465c58", "bodies": ["ext = []\nif self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\nreturn ext", "info = {}\nif pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\nelse:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\nif pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\nreturn info"], "bodies_text": "<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "class_docstring": "Compute α but requires storing individual gradients.", "class_name": "AlphaExpensive", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000352", "length_bytes": 16011, "license_type": "permissive", "methods": [{"docstring": "Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.", "name": "extensions", "signature": "def extensions(self, global_step)"}, {"docstring": "Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.", "name": "_fetch_values", "signature": "def _fetch_values(self, params, batch_loss, pos, global_step)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000352", "prompt": "Implement the Python class `AlphaExpensive` described below.\n\nClass description:\nCompute α but requires storing individual gradients.\n\nMethod signatures and docstrings:\n- def extensions(self, global_step): Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\n- def _fetch_values(self, params, batch_loss, pos, global_step): Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.", "prompted_full_text": "Implement the Python class `AlphaExpensive` described below.\n\nClass description:\nCompute α but requires storing individual gradients.\n\nMethod signatures and docstrings:\n- def extensions(self, global_step): Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\n- def _fetch_values(self, params, batch_loss, pos, global_step): Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\n\n<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "revision_id": "5bd5ab3cda03eda0b0bf276f29d5c28b83d70b06", "skeleton": "<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n", "source": "the_stack_v2_python_sparse", "source_path": "cockpit/quantities/alpha.py", "source_repo": "MeNicefellow/cockpit", "split": "val", "star_events_count": 0} {"blob_id": "3de78b84a8fe8dedfeb021f99996643bad8b60b0", "bodies": ["n = len(s)\ndp = [[0 for _ in range(n)] for _ in range(n)]\nmax_length = 1\nmax_str = s[0]\nfor i in range(n):\n dp[i][i] = 1\nfor i in range(n - 1):\n if s[i] == s[i + 1]:\n dp[i][i + 1] = 1\n max_str = s[i:i + 2]\n max_length = 2\nfor k in range(3, n + 1):\n for i in range(n - k + 1):\n j = i + k - 1\n if dp[i + 1][j - 1] and s[i] == s[j]:\n dp[i][j] = 1\n if k > max_length:\n max_str = s[i:j + 1]\n max_length = k\nreturn max_str", "n = len(s)\nres = s[0]\n\ndef extend(i, j):\n while i >= 0 and j < len(s) and (s[i] == s[j]):\n i -= 1\n j += 1\n return s[i + 1:j]\nfor i in range(n - 1):\n e1 = extend(i, i)\n e2 = extend(i, i + 1)\n if max(len(e1), len(e2)) > len(res):\n res = e1 if len(e1) > len(e2) else e2\nreturn res"], "bodies_text": "<|body_start_0|>\n n = len(s)\n dp = [[0 for _ in range(n)] for _ in range(n)]\n max_length = 1\n max_str = s[0]\n for i in range(n):\n dp[i][i] = 1\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n dp[i][i + 1] = 1\n max_str = s[i:i + 2]\n max_length = 2\n for k in range(3, n + 1):\n for i in range(n - k + 1):\n j = i + k - 1\n if dp[i + 1][j - 1] and s[i] == s[j]:\n dp[i][j] = 1\n if k > max_length:\n max_str = s[i:j + 1]\n max_length = k\n return max_str\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n res = s[0]\n\n def extend(i, j):\n while i >= 0 and j < len(s) and (s[i] == s[j]):\n i -= 1\n j += 1\n return s[i + 1:j]\n for i in range(n - 1):\n e1 = extend(i, i)\n e2 = extend(i, i + 1)\n if max(len(e1), len(e2)) > len(res):\n res = e1 if len(e1) > len(e2) else e2\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\"\"\"\n <|body_0|>\n\n def longestPalindrome2(self, s: str) -> str:\n \"\"\"扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n dp = [[0 for _ in range(n)] for _ in range(n)]\n max_length = 1\n max_str = s[0]\n for i in range(n):\n dp[i][i] = 1\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n dp[i][i + 1] = 1\n max_str = s[i:i + 2]\n max_length = 2\n for k in range(3, n + 1):\n for i in range(n - k + 1):\n j = i + k - 1\n if dp[i + 1][j - 1] and s[i] == s[j]:\n dp[i][j] = 1\n if k > max_length:\n max_str = s[i:j + 1]\n max_length = k\n return max_str\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n res = s[0]\n\n def extend(i, j):\n while i >= 0 and j < len(s) and (s[i] == s[j]):\n i -= 1\n j += 1\n return s[i + 1:j]\n for i in range(n - 1):\n e1 = extend(i, i)\n e2 = extend(i, i + 1)\n if max(len(e1), len(e2)) > len(res):\n res = e1 if len(e1) > len(e2) else e2\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000353", "length_bytes": 4264, "license_type": "no_license", "methods": [{"docstring": "DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^", "name": "longestPalindrome", "signature": "def longestPalindrome(self, s: str) -> str"}, {"docstring": "扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个", "name": "longestPalindrome2", "signature": "def longestPalindrome2(self, s: str) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome(self, s: str) -> str: DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\n- def longestPalindrome2(self, s: str) -> str: 扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestPalindrome(self, s: str) -> str: DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\n- def longestPalindrome2(self, s: str) -> str: 扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个\n\n<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\"\"\"\n <|body_0|>\n\n def longestPalindrome2(self, s: str) -> str:\n \"\"\"扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n dp = [[0 for _ in range(n)] for _ in range(n)]\n max_length = 1\n max_str = s[0]\n for i in range(n):\n dp[i][i] = 1\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n dp[i][i + 1] = 1\n max_str = s[i:i + 2]\n max_length = 2\n for k in range(3, n + 1):\n for i in range(n - k + 1):\n j = i + k - 1\n if dp[i + 1][j - 1] and s[i] == s[j]:\n dp[i][j] = 1\n if k > max_length:\n max_str = s[i:j + 1]\n max_length = k\n return max_str\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n res = s[0]\n\n def extend(i, j):\n while i >= 0 and j < len(s) and (s[i] == s[j]):\n i -= 1\n j += 1\n return s[i + 1:j]\n for i in range(n - 1):\n e1 = extend(i, i)\n e2 = extend(i, i + 1)\n if max(len(e1), len(e2)) > len(res):\n res = e1 if len(e1) > len(e2) else e2\n return res\n<|end_body_1|>\n", "revision_id": "3a5649357e0f21cbbc5e238351300cd706d533b3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestPalindrome(self, s: str) -> str:\n \"\"\"DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\"\"\"\n <|body_0|>\n\n def longestPalindrome2(self, s: str) -> str:\n \"\"\"扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def longestPalindrome(self, s: str) -> str:\n \"\"\"DP 视频:https://www.youtube.com/watch?v=UflHuQj6MVA&t=441s 代码:https://www.geeksforgeeks.org/longest-palindrome-substring-set-1/ 思想: 先判断最长的子串是否为 1,2,及大于 3 max_length = 1, max_str = s[0], dp[i][i] = 1 max_length = 2, max_str = s[i:i+2], dp[i][i+1] = 1 max_length >= 3, max_str = s[i:i+k], k is the length of substring k 的范围是 [3, n+1] 表示整体往 dp 的右上方向遍历,结合视频中的那张图理解 i 从 0 开始,直到 n-k+1 行。也就是 i 从 第 0 行开始,斜向下遍历(以下是 dp table, 可以结合一下视频) 3 ( 0 2 ) ( 1 3 ) ( 2 4 ) ( 3 5 ) ( 4 6 ) ( 5 7 ) 4 ( 0 3 ) ( 1 4 ) ( 2 5 ) ( 3 6 ) ( 4 7 ) 5 ( 0 4 ) ( 1 5 ) ( 2 6 ) ( 3 7 ) 6 ( 0 5 ) ( 1 6 ) ( 2 7 ) 7 ( 0 6 ) ( 1 7 ) 8 ( 0 7 ) 生成 dp table 用来记录状态 状态转移方程:dp[i+1][j-1] and s[i] == s[j] 这里借用了一个大小为 M * N 二维的 dp table Time O(n^\"\"\"\n n = len(s)\n dp = [[0 for _ in range(n)] for _ in range(n)]\n max_length = 1\n max_str = s[0]\n for i in range(n):\n dp[i][i] = 1\n for i in range(n - 1):\n if s[i] == s[i + 1]:\n dp[i][i + 1] = 1\n max_str = s[i:i + 2]\n max_length = 2\n for k in range(3, n + 1):\n for i in range(n - k + 1):\n j = i + k - 1\n if dp[i + 1][j - 1] and s[i] == s[j]:\n dp[i][j] = 1\n if k > max_length:\n max_str = s[i:j + 1]\n max_length = k\n return max_str\n\n def longestPalindrome2(self, s: str) -> str:\n \"\"\"扩展中心法 Expand Around Center 1. 遍历 str 中的点,以每个点为中心分别进行奇数、偶数开始向外扩展 2. 记录下扩展后得到的最大 str 3. 注意 extend 函数推出的时间点 这个解法比上一种解法好理解很多,速度也快,重点掌握这个\"\"\"\n n = len(s)\n res = s[0]\n\n def extend(i, j):\n while i >= 0 and j < len(s) and (s[i] == s[j]):\n i -= 1\n j += 1\n return s[i + 1:j]\n for i in range(n - 1):\n e1 = extend(i, i)\n e2 = extend(i, i + 1)\n if max(len(e1), len(e2)) > len(res):\n res = e1 if len(e1) > len(e2) else e2\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode-py/leetcode5.py", "source_repo": "cicihou/LearningProject", "split": "val", "star_events_count": 0} {"blob_id": "5e8b9932734bec2eac26839189e7c997956ec95b", "bodies": ["if self.request.version == 'v6':\n return ScaleFileSerializerV6\nelif self.request.version == 'v7':\n return ScaleFileSerializerV6", "if request.version == 'v6':\n return self._list_v6(request)\nelif request.version == 'v7':\n return self._list_v6(request)\nraise Http404()", "countries = rest_util.parse_string_list(request, 'countries', required=False)\ndata_started = rest_util.parse_timestamp(request, 'data_started', required=False)\ndata_ended = rest_util.parse_timestamp(request, 'data_ended', required=False)\nrest_util.check_time_range(data_started, data_ended)\nsource_started = rest_util.parse_timestamp(request, 'source_started', required=False)\nsource_ended = rest_util.parse_timestamp(request, 'source_ended', required=False)\nrest_util.check_time_range(source_started, source_ended)\nsource_sensor_classes = rest_util.parse_string_list(request, 'source_sensor_class', required=False)\nsource_sensors = rest_util.parse_string_list(request, 'source_sensor', required=False)\nsource_collections = rest_util.parse_string_list(request, 'source_collection', required=False)\nsource_tasks = rest_util.parse_string_list(request, 'source_task', required=False)\nmod_started = rest_util.parse_timestamp(request, 'modified_started', required=False)\nmod_ended = rest_util.parse_timestamp(request, 'modified_ended', required=False)\nrest_util.check_time_range(mod_started, mod_ended)\njob_type_ids = rest_util.parse_int_list(request, 'job_type_id', required=False)\njob_type_names = rest_util.parse_string_list(request, 'job_type_name', required=False)\njob_ids = rest_util.parse_int_list(request, 'job_id', required=False)\nfile_names = rest_util.parse_string_list(request, 'file_name', required=False)\njob_outputs = rest_util.parse_string_list(request, 'job_output', required=False)\nrecipe_ids = rest_util.parse_int_list(request, 'recipe_id', required=False)\nrecipe_type_ids = rest_util.parse_int_list(request, 'recipe_type_id', required=False)\nrecipe_nodes = rest_util.parse_string_list(request, 'recipe_node', required=False)\nbatch_ids = rest_util.parse_int_list(request, 'batch_id', required=False)\norder = rest_util.parse_string_list(request, 'order', required=False)\nfiles = ScaleFile.objects.filter_files(data_started=data_started, data_ended=data_ended, source_started=source_started, source_ended=source_ended, source_sensor_classes=source_sensor_classes, source_sensors=source_sensors, source_collections=source_collections, source_tasks=source_tasks, mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids, job_type_names=job_type_names, job_ids=job_ids, file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids, recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids, order=order, countries=countries)\npage = self.paginate_queryset(files)\nserializer = self.get_serializer(page, many=True)\nreturn self.get_paginated_response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if self.request.version == 'v6':\n return ScaleFileSerializerV6\n elif self.request.version == 'v7':\n return ScaleFileSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n countries = rest_util.parse_string_list(request, 'countries', required=False)\n data_started = rest_util.parse_timestamp(request, 'data_started', required=False)\n data_ended = rest_util.parse_timestamp(request, 'data_ended', required=False)\n rest_util.check_time_range(data_started, data_ended)\n source_started = rest_util.parse_timestamp(request, 'source_started', required=False)\n source_ended = rest_util.parse_timestamp(request, 'source_ended', required=False)\n rest_util.check_time_range(source_started, source_ended)\n source_sensor_classes = rest_util.parse_string_list(request, 'source_sensor_class', required=False)\n source_sensors = rest_util.parse_string_list(request, 'source_sensor', required=False)\n source_collections = rest_util.parse_string_list(request, 'source_collection', required=False)\n source_tasks = rest_util.parse_string_list(request, 'source_task', required=False)\n mod_started = rest_util.parse_timestamp(request, 'modified_started', required=False)\n mod_ended = rest_util.parse_timestamp(request, 'modified_ended', required=False)\n rest_util.check_time_range(mod_started, mod_ended)\n job_type_ids = rest_util.parse_int_list(request, 'job_type_id', required=False)\n job_type_names = rest_util.parse_string_list(request, 'job_type_name', required=False)\n job_ids = rest_util.parse_int_list(request, 'job_id', required=False)\n file_names = rest_util.parse_string_list(request, 'file_name', required=False)\n job_outputs = rest_util.parse_string_list(request, 'job_output', required=False)\n recipe_ids = rest_util.parse_int_list(request, 'recipe_id', required=False)\n recipe_type_ids = rest_util.parse_int_list(request, 'recipe_type_id', required=False)\n recipe_nodes = rest_util.parse_string_list(request, 'recipe_node', required=False)\n batch_ids = rest_util.parse_int_list(request, 'batch_id', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n files = ScaleFile.objects.filter_files(data_started=data_started, data_ended=data_ended, source_started=source_started, source_ended=source_ended, source_sensor_classes=source_sensor_classes, source_sensors=source_sensors, source_collections=source_collections, source_tasks=source_tasks, mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids, job_type_names=job_type_names, job_ids=job_ids, file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids, recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids, order=order, countries=countries)\n page = self.paginate_queryset(files)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "class_docstring": "This view is the endpoint for retrieving source/product files", "class_name": "FilesView", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FilesView:\n \"\"\"This view is the endpoint for retrieving source/product files\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return ScaleFileSerializerV6\n elif self.request.version == 'v7':\n return ScaleFileSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n countries = rest_util.parse_string_list(request, 'countries', required=False)\n data_started = rest_util.parse_timestamp(request, 'data_started', required=False)\n data_ended = rest_util.parse_timestamp(request, 'data_ended', required=False)\n rest_util.check_time_range(data_started, data_ended)\n source_started = rest_util.parse_timestamp(request, 'source_started', required=False)\n source_ended = rest_util.parse_timestamp(request, 'source_ended', required=False)\n rest_util.check_time_range(source_started, source_ended)\n source_sensor_classes = rest_util.parse_string_list(request, 'source_sensor_class', required=False)\n source_sensors = rest_util.parse_string_list(request, 'source_sensor', required=False)\n source_collections = rest_util.parse_string_list(request, 'source_collection', required=False)\n source_tasks = rest_util.parse_string_list(request, 'source_task', required=False)\n mod_started = rest_util.parse_timestamp(request, 'modified_started', required=False)\n mod_ended = rest_util.parse_timestamp(request, 'modified_ended', required=False)\n rest_util.check_time_range(mod_started, mod_ended)\n job_type_ids = rest_util.parse_int_list(request, 'job_type_id', required=False)\n job_type_names = rest_util.parse_string_list(request, 'job_type_name', required=False)\n job_ids = rest_util.parse_int_list(request, 'job_id', required=False)\n file_names = rest_util.parse_string_list(request, 'file_name', required=False)\n job_outputs = rest_util.parse_string_list(request, 'job_output', required=False)\n recipe_ids = rest_util.parse_int_list(request, 'recipe_id', required=False)\n recipe_type_ids = rest_util.parse_int_list(request, 'recipe_type_id', required=False)\n recipe_nodes = rest_util.parse_string_list(request, 'recipe_node', required=False)\n batch_ids = rest_util.parse_int_list(request, 'batch_id', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n files = ScaleFile.objects.filter_files(data_started=data_started, data_ended=data_ended, source_started=source_started, source_ended=source_ended, source_sensor_classes=source_sensor_classes, source_sensors=source_sensors, source_collections=source_collections, source_tasks=source_tasks, mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids, job_type_names=job_type_names, job_ids=job_ids, file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids, recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids, order=order, countries=countries)\n page = self.paginate_queryset(files)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000354", "length_bytes": 19677, "license_type": "permissive", "methods": [{"docstring": "Returns the appropriate serializer based off the requests version of the REST API", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list", "signature": "def list(self, request)"}, {"docstring": "Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "_list_v6", "signature": "def _list_v6(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006723", "prompt": "Implement the Python class `FilesView` described below.\n\nClass description:\nThis view is the endpoint for retrieving source/product files\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _list_v6(self, request): Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "prompted_full_text": "Implement the Python class `FilesView` described below.\n\nClass description:\nThis view is the endpoint for retrieving source/product files\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def _list_v6(self, request): Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n\n<|skeleton|>\nclass FilesView:\n \"\"\"This view is the endpoint for retrieving source/product files\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return ScaleFileSerializerV6\n elif self.request.version == 'v7':\n return ScaleFileSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n countries = rest_util.parse_string_list(request, 'countries', required=False)\n data_started = rest_util.parse_timestamp(request, 'data_started', required=False)\n data_ended = rest_util.parse_timestamp(request, 'data_ended', required=False)\n rest_util.check_time_range(data_started, data_ended)\n source_started = rest_util.parse_timestamp(request, 'source_started', required=False)\n source_ended = rest_util.parse_timestamp(request, 'source_ended', required=False)\n rest_util.check_time_range(source_started, source_ended)\n source_sensor_classes = rest_util.parse_string_list(request, 'source_sensor_class', required=False)\n source_sensors = rest_util.parse_string_list(request, 'source_sensor', required=False)\n source_collections = rest_util.parse_string_list(request, 'source_collection', required=False)\n source_tasks = rest_util.parse_string_list(request, 'source_task', required=False)\n mod_started = rest_util.parse_timestamp(request, 'modified_started', required=False)\n mod_ended = rest_util.parse_timestamp(request, 'modified_ended', required=False)\n rest_util.check_time_range(mod_started, mod_ended)\n job_type_ids = rest_util.parse_int_list(request, 'job_type_id', required=False)\n job_type_names = rest_util.parse_string_list(request, 'job_type_name', required=False)\n job_ids = rest_util.parse_int_list(request, 'job_id', required=False)\n file_names = rest_util.parse_string_list(request, 'file_name', required=False)\n job_outputs = rest_util.parse_string_list(request, 'job_output', required=False)\n recipe_ids = rest_util.parse_int_list(request, 'recipe_id', required=False)\n recipe_type_ids = rest_util.parse_int_list(request, 'recipe_type_id', required=False)\n recipe_nodes = rest_util.parse_string_list(request, 'recipe_node', required=False)\n batch_ids = rest_util.parse_int_list(request, 'batch_id', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n files = ScaleFile.objects.filter_files(data_started=data_started, data_ended=data_ended, source_started=source_started, source_ended=source_ended, source_sensor_classes=source_sensor_classes, source_sensors=source_sensors, source_collections=source_collections, source_tasks=source_tasks, mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids, job_type_names=job_type_names, job_ids=job_ids, file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids, recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids, order=order, countries=countries)\n page = self.paginate_queryset(files)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass FilesView:\n \"\"\"This view is the endpoint for retrieving source/product files\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def _list_v6(self, request):\n \"\"\"Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FilesView:\n \"\"\"This view is the endpoint for retrieving source/product files\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n if self.request.version == 'v6':\n return ScaleFileSerializerV6\n elif self.request.version == 'v7':\n return ScaleFileSerializerV6\n\n def list(self, request):\n \"\"\"Retrieves the batches and returns them in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self._list_v6(request)\n elif request.version == 'v7':\n return self._list_v6(request)\n raise Http404()\n\n def _list_v6(self, request):\n \"\"\"Retrieves a list of files based on filters and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n countries = rest_util.parse_string_list(request, 'countries', required=False)\n data_started = rest_util.parse_timestamp(request, 'data_started', required=False)\n data_ended = rest_util.parse_timestamp(request, 'data_ended', required=False)\n rest_util.check_time_range(data_started, data_ended)\n source_started = rest_util.parse_timestamp(request, 'source_started', required=False)\n source_ended = rest_util.parse_timestamp(request, 'source_ended', required=False)\n rest_util.check_time_range(source_started, source_ended)\n source_sensor_classes = rest_util.parse_string_list(request, 'source_sensor_class', required=False)\n source_sensors = rest_util.parse_string_list(request, 'source_sensor', required=False)\n source_collections = rest_util.parse_string_list(request, 'source_collection', required=False)\n source_tasks = rest_util.parse_string_list(request, 'source_task', required=False)\n mod_started = rest_util.parse_timestamp(request, 'modified_started', required=False)\n mod_ended = rest_util.parse_timestamp(request, 'modified_ended', required=False)\n rest_util.check_time_range(mod_started, mod_ended)\n job_type_ids = rest_util.parse_int_list(request, 'job_type_id', required=False)\n job_type_names = rest_util.parse_string_list(request, 'job_type_name', required=False)\n job_ids = rest_util.parse_int_list(request, 'job_id', required=False)\n file_names = rest_util.parse_string_list(request, 'file_name', required=False)\n job_outputs = rest_util.parse_string_list(request, 'job_output', required=False)\n recipe_ids = rest_util.parse_int_list(request, 'recipe_id', required=False)\n recipe_type_ids = rest_util.parse_int_list(request, 'recipe_type_id', required=False)\n recipe_nodes = rest_util.parse_string_list(request, 'recipe_node', required=False)\n batch_ids = rest_util.parse_int_list(request, 'batch_id', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n files = ScaleFile.objects.filter_files(data_started=data_started, data_ended=data_ended, source_started=source_started, source_ended=source_ended, source_sensor_classes=source_sensor_classes, source_sensors=source_sensors, source_collections=source_collections, source_tasks=source_tasks, mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids, job_type_names=job_type_names, job_ids=job_ids, file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids, recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids, order=order, countries=countries)\n page = self.paginate_queryset(files)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/storage/views.py", "source_repo": "kfconsultant/scale", "split": "val", "star_events_count": 0} {"blob_id": "20e34197d6bb789ff5065d68997d71513b188a13", "bodies": ["self.user = user\nself.filter_by_user = kwargs.pop('filter_by_user', True)\nfor facet in self.facets:\n if facet in kwargs:\n kwargs.setdefault('filters', {})[facet] = kwargs.pop(facet)\nfor f in ALL_FACETS:\n if f in kwargs:\n del kwargs[f]\nlog.info('Hacking Elastic to fix search connection pooling')\nself.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])\nsuper().__init__(**kwargs)", "search = search.highlight_options(encoder='html', number_of_fragments=3)\nsearch = search.source(exclude=['content', 'headers'])\nall_queries = []\nfor operator in self.operators:\n query_string = SimpleQueryString(query=query, fields=self.fields, default_operator=operator)\n all_queries.append(query_string)\nbool_query = Bool(should=all_queries)\nsearch = search.query(bool_query)\nreturn search"], "bodies_text": "<|body_start_0|>\n self.user = user\n self.filter_by_user = kwargs.pop('filter_by_user', True)\n for facet in self.facets:\n if facet in kwargs:\n kwargs.setdefault('filters', {})[facet] = kwargs.pop(facet)\n for f in ALL_FACETS:\n if f in kwargs:\n del kwargs[f]\n log.info('Hacking Elastic to fix search connection pooling')\n self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n search = search.highlight_options(encoder='html', number_of_fragments=3)\n search = search.source(exclude=['content', 'headers'])\n all_queries = []\n for operator in self.operators:\n query_string = SimpleQueryString(query=query, fields=self.fields, default_operator=operator)\n all_queries.append(query_string)\n bool_query = Bool(should=all_queries)\n search = search.query(bool_query)\n return search\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RTDFacetedSearch", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RTDFacetedSearch:\n\n def __init__(self, user, **kwargs):\n \"\"\"Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\"\"\"\n <|body_0|>\n\n def query(self, search, query):\n \"\"\"Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = user\n self.filter_by_user = kwargs.pop('filter_by_user', True)\n for facet in self.facets:\n if facet in kwargs:\n kwargs.setdefault('filters', {})[facet] = kwargs.pop(facet)\n for f in ALL_FACETS:\n if f in kwargs:\n del kwargs[f]\n log.info('Hacking Elastic to fix search connection pooling')\n self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n search = search.highlight_options(encoder='html', number_of_fragments=3)\n search = search.source(exclude=['content', 'headers'])\n all_queries = []\n for operator in self.operators:\n query_string = SimpleQueryString(query=query, fields=self.fields, default_operator=operator)\n all_queries.append(query_string)\n bool_query = Bool(should=all_queries)\n search = search.query(bool_query)\n return search\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000355", "length_bytes": 7864, "license_type": "permissive", "methods": [{"docstring": "Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.", "name": "__init__", "signature": "def __init__(self, user, **kwargs)"}, {"docstring": "Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.", "name": "query", "signature": "def query(self, search, query)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003340", "prompt": "Implement the Python class `RTDFacetedSearch` described below.\n\nClass description:\nImplement the RTDFacetedSearch class.\n\nMethod signatures and docstrings:\n- def __init__(self, user, **kwargs): Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\n- def query(self, search, query): Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.", "prompted_full_text": "Implement the Python class `RTDFacetedSearch` described below.\n\nClass description:\nImplement the RTDFacetedSearch class.\n\nMethod signatures and docstrings:\n- def __init__(self, user, **kwargs): Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\n- def query(self, search, query): Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.\n\n<|skeleton|>\nclass RTDFacetedSearch:\n\n def __init__(self, user, **kwargs):\n \"\"\"Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\"\"\"\n <|body_0|>\n\n def query(self, search, query):\n \"\"\"Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.user = user\n self.filter_by_user = kwargs.pop('filter_by_user', True)\n for facet in self.facets:\n if facet in kwargs:\n kwargs.setdefault('filters', {})[facet] = kwargs.pop(facet)\n for f in ALL_FACETS:\n if f in kwargs:\n del kwargs[f]\n log.info('Hacking Elastic to fix search connection pooling')\n self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n search = search.highlight_options(encoder='html', number_of_fragments=3)\n search = search.source(exclude=['content', 'headers'])\n all_queries = []\n for operator in self.operators:\n query_string = SimpleQueryString(query=query, fields=self.fields, default_operator=operator)\n all_queries.append(query_string)\n bool_query = Bool(should=all_queries)\n search = search.query(bool_query)\n return search\n<|end_body_1|>\n", "revision_id": "649965d7589eb1d30efdc7906c3ee7dc5a9e3656", "skeleton": "<|skeleton|>\nclass RTDFacetedSearch:\n\n def __init__(self, user, **kwargs):\n \"\"\"Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\"\"\"\n <|body_0|>\n\n def query(self, search, query):\n \"\"\"Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RTDFacetedSearch:\n def __init__(self, user, **kwargs):\n \"\"\"Pass in a user in order to filter search results by privacy. .. warning:: The `self.user` and `self.filter_by_user` attributes aren't currently used on the .org, but are used on the .com.\"\"\"\n self.user = user\n self.filter_by_user = kwargs.pop('filter_by_user', True)\n for facet in self.facets:\n if facet in kwargs:\n kwargs.setdefault('filters', {})[facet] = kwargs.pop(facet)\n for f in ALL_FACETS:\n if f in kwargs:\n del kwargs[f]\n log.info('Hacking Elastic to fix search connection pooling')\n self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])\n super().__init__(**kwargs)\n\n def query(self, search, query):\n \"\"\"Add query part to ``search`` when needed. Also: * Adds SimpleQueryString instead of default query. * Adds HTML encoding of results to avoid XSS issues.\"\"\"\n search = search.highlight_options(encoder='html', number_of_fragments=3)\n search = search.source(exclude=['content', 'headers'])\n all_queries = []\n for operator in self.operators:\n query_string = SimpleQueryString(query=query, fields=self.fields, default_operator=operator)\n all_queries.append(query_string)\n bool_query = Bool(should=all_queries)\n search = search.query(bool_query)\n return search\n", "source": "the_stack_v2_python_sparse", "source_path": "readthedocs/search/faceted_search.py", "source_repo": "italia/docs.italia.it", "split": "val", "star_events_count": 19} {"blob_id": "96e828c3454d61a46519d130bcd8cd5bbc835677", "bodies": ["if not root:\n return '[]'\ndata = []\nqueue = [root]\nwhile queue:\n length = len(queue)\n for i in range(length):\n node = queue.pop(0)\n if node:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\nwhile data[-1] is None:\n data.pop()\nreturn str(data)", "if data == '[]':\n return\ni, j = (1, 1)\nwhile data[i].isdigit() or data[i] == '-':\n i += 1\nroot = TreeNode(int(data[j:i]))\nqueue = [[root, 0]]\nwhile i < len(data):\n if data[i].isdigit() or data[i] == '-':\n j = i\n while i < len(data) and data[i].isdigit() or data[i] == '-':\n i += 1\n new = TreeNode(int(data[j:i]))\n if not queue[0][1]:\n queue[0][0].left = new\n queue[0][1] += 1\n queue.append([new, 0])\n else:\n node, t = queue.pop(0)\n node.right = new\n queue.append([new, 0])\n elif data[i] == 'N':\n if not queue[0][1]:\n queue[0][1] += 1\n else:\n queue.pop(0)\n i += 4\n else:\n i += 1\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return '[]'\n data = []\n queue = [root]\n while queue:\n length = len(queue)\n for i in range(length):\n node = queue.pop(0)\n if node:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n while data[-1] is None:\n data.pop()\n return str(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]':\n return\n i, j = (1, 1)\n while data[i].isdigit() or data[i] == '-':\n i += 1\n root = TreeNode(int(data[j:i]))\n queue = [[root, 0]]\n while i < len(data):\n if data[i].isdigit() or data[i] == '-':\n j = i\n while i < len(data) and data[i].isdigit() or data[i] == '-':\n i += 1\n new = TreeNode(int(data[j:i]))\n if not queue[0][1]:\n queue[0][0].left = new\n queue[0][1] += 1\n queue.append([new, 0])\n else:\n node, t = queue.pop(0)\n node.right = new\n queue.append([new, 0])\n elif data[i] == 'N':\n if not queue[0][1]:\n queue[0][1] += 1\n else:\n queue.pop(0)\n i += 4\n else:\n i += 1\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '[]'\n data = []\n queue = [root]\n while queue:\n length = len(queue)\n for i in range(length):\n node = queue.pop(0)\n if node:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n while data[-1] is None:\n data.pop()\n return str(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]':\n return\n i, j = (1, 1)\n while data[i].isdigit() or data[i] == '-':\n i += 1\n root = TreeNode(int(data[j:i]))\n queue = [[root, 0]]\n while i < len(data):\n if data[i].isdigit() or data[i] == '-':\n j = i\n while i < len(data) and data[i].isdigit() or data[i] == '-':\n i += 1\n new = TreeNode(int(data[j:i]))\n if not queue[0][1]:\n queue[0][0].left = new\n queue[0][1] += 1\n queue.append([new, 0])\n else:\n node, t = queue.pop(0)\n node.right = new\n queue.append([new, 0])\n elif data[i] == 'N':\n if not queue[0][1]:\n queue[0][1] += 1\n else:\n queue.pop(0)\n i += 4\n else:\n i += 1\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000356", "length_bytes": 3240, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006765", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '[]'\n data = []\n queue = [root]\n while queue:\n length = len(queue)\n for i in range(length):\n node = queue.pop(0)\n if node:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n while data[-1] is None:\n data.pop()\n return str(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]':\n return\n i, j = (1, 1)\n while data[i].isdigit() or data[i] == '-':\n i += 1\n root = TreeNode(int(data[j:i]))\n queue = [[root, 0]]\n while i < len(data):\n if data[i].isdigit() or data[i] == '-':\n j = i\n while i < len(data) and data[i].isdigit() or data[i] == '-':\n i += 1\n new = TreeNode(int(data[j:i]))\n if not queue[0][1]:\n queue[0][0].left = new\n queue[0][1] += 1\n queue.append([new, 0])\n else:\n node, t = queue.pop(0)\n node.right = new\n queue.append([new, 0])\n elif data[i] == 'N':\n if not queue[0][1]:\n queue[0][1] += 1\n else:\n queue.pop(0)\n i += 4\n else:\n i += 1\n return root\n<|end_body_1|>\n", "revision_id": "f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n if not root:\n return '[]'\n data = []\n queue = [root]\n while queue:\n length = len(queue)\n for i in range(length):\n node = queue.pop(0)\n if node:\n data.append(node.val)\n queue.append(node.left)\n queue.append(node.right)\n else:\n data.append(None)\n while data[-1] is None:\n data.pop()\n return str(data)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if data == '[]':\n return\n i, j = (1, 1)\n while data[i].isdigit() or data[i] == '-':\n i += 1\n root = TreeNode(int(data[j:i]))\n queue = [[root, 0]]\n while i < len(data):\n if data[i].isdigit() or data[i] == '-':\n j = i\n while i < len(data) and data[i].isdigit() or data[i] == '-':\n i += 1\n new = TreeNode(int(data[j:i]))\n if not queue[0][1]:\n queue[0][0].left = new\n queue[0][1] += 1\n queue.append([new, 0])\n else:\n node, t = queue.pop(0)\n node.right = new\n queue.append([new, 0])\n elif data[i] == 'N':\n if not queue[0][1]:\n queue[0][1] += 1\n else:\n queue.pop(0)\n i += 4\n else:\n i += 1\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/297. 二叉树的序列化与反序列化.py", "source_repo": "QDylan/Learning-", "split": "val", "star_events_count": 0} {"blob_id": "97eb4b976a8690d4f30bdd3946dbcf022ccdb28d", "bodies": ["global url\nsess = session()\nparams = {'parameter': {'recognize_class': 'update', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'true', 'cn_date': 'false', 'cn_telephone': 'true', 'cn_passport': 'false'}}}\ns_get = sess.put(url, json=params)\njson_data = json.loads(s_get.content.decode('utf8'))\nprint('修改:')\nprint(json_data)\nstatus = json_data['status']\nself.assertEqual(status, 0)\nclass_message = json_data['class_message']\nself.assertEqual(4, len(class_message))\nprint(type(class_message))\nsess.close()", "global url\nsess = session()\nparams = {'parameter': {'recognize_class': 'default', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'false', 'cn_date': 'false', 'cn_telephon': 'false', 'cn_passport': 'false'}}}\ns_get = sess.put(url, json=params)\njson_data = json.loads(s_get.content.decode('utf8'))\nprint('默认')\nprint(json_data)\nstatus = json_data['status']\nself.assertEqual(status, 0)\nclass_message = json_data['class_message']\nself.assertEqual(4, len(class_message))\nprint(type(class_message))\nsess.close()"], "bodies_text": "<|body_start_0|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'update', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'true', 'cn_date': 'false', 'cn_telephone': 'true', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('修改:')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_0|>\n\n<|body_start_1|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'default', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'false', 'cn_date': 'false', 'cn_telephon': 'false', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('默认')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TestApiPorpertySensitive", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestApiPorpertySensitive:\n\n def test_update_property(self):\n \"\"\"参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\"\"\"\n <|body_0|>\n\n def test_default_property(self):\n \"\"\"参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'update', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'true', 'cn_date': 'false', 'cn_telephone': 'true', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('修改:')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_0|>\n\n<|body_start_1|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'default', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'false', 'cn_date': 'false', 'cn_telephon': 'false', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('默认')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000357", "length_bytes": 2846, "license_type": "no_license", "methods": [{"docstring": "参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank", "name": "test_update_property", "signature": "def test_update_property(self)"}, {"docstring": "参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon", "name": "test_default_property", "signature": "def test_default_property(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002481", "prompt": "Implement the Python class `TestApiPorpertySensitive` described below.\n\nClass description:\nImplement the TestApiPorpertySensitive class.\n\nMethod signatures and docstrings:\n- def test_update_property(self): 参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\n- def test_default_property(self): 参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon", "prompted_full_text": "Implement the Python class `TestApiPorpertySensitive` described below.\n\nClass description:\nImplement the TestApiPorpertySensitive class.\n\nMethod signatures and docstrings:\n- def test_update_property(self): 参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\n- def test_default_property(self): 参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon\n\n<|skeleton|>\nclass TestApiPorpertySensitive:\n\n def test_update_property(self):\n \"\"\"参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\"\"\"\n <|body_0|>\n\n def test_default_property(self):\n \"\"\"参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'update', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'true', 'cn_date': 'false', 'cn_telephone': 'true', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('修改:')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_0|>\n\n<|body_start_1|>\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'default', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'false', 'cn_date': 'false', 'cn_telephon': 'false', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('默认')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n<|end_body_1|>\n", "revision_id": "fb268cf7901322bb16b8b295f2c791628665bfb8", "skeleton": "<|skeleton|>\nclass TestApiPorpertySensitive:\n\n def test_update_property(self):\n \"\"\"参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\"\"\"\n <|body_0|>\n\n def test_default_property(self):\n \"\"\"参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestApiPorpertySensitive:\n def test_update_property(self):\n \"\"\"参数: 修改类别为update,id和bank为true,其他为false 期望结果: 返回的识别信息列表有两个信息cn_id和cn_bank\"\"\"\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'update', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'true', 'cn_date': 'false', 'cn_telephone': 'true', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('修改:')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n\n def test_default_property(self):\n \"\"\"参数: 修改类别为defalut, 期望结果: 返回的识别信息列表有四个信息cn_id、cn_bank、cn_email、cn_telephon\"\"\"\n global url\n sess = session()\n params = {'parameter': {'recognize_class': 'default', 'rate_grade': '高', 'update_propertiy': {'cn_id': 'true', 'cn_bank': 'true', 'cn_email': 'false', 'cn_date': 'false', 'cn_telephon': 'false', 'cn_passport': 'false'}}}\n s_get = sess.put(url, json=params)\n json_data = json.loads(s_get.content.decode('utf8'))\n print('默认')\n print(json_data)\n status = json_data['status']\n self.assertEqual(status, 0)\n class_message = json_data['class_message']\n self.assertEqual(4, len(class_message))\n print(type(class_message))\n sess.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "aladdin-cas/unit_test/test_api_property_privacy.py", "source_repo": "ARES3366/aladdin", "split": "val", "star_events_count": 0} {"blob_id": "8f67d59da3bc32ceb80cb28e394e6aca85cb7f3c", "bodies": ["args = [Ndisc.NDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count), Ndisc.TIMEOUT_OPTION, str(int(timeout) * 1000)]\nif quiet is True:\n args.append(Ndisc.QUIET_OPTION)\nif firstReply is True:\n args.append(Ndisc.FIRST_REPLY_OPTION)\nargs.append(destination)\nargs.append(device)\nrc = Command.execute(logger, Ndisc.NDISC6_COMMAND_NAME, args, timeoutSec=timeout + 3, blocking=blocking)\nreturn rc", "args = [Ndisc.RDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count)]\nif quiet is True:\n args.append(Ndisc.QUIET_OPTION)\nargs.append(destination)\nargs.append(device)\nrc = Command.execute(logger, Ndisc.RDISC6_COMMAND_NAME, args, blocking=blocking)\nreturn rc"], "bodies_text": "<|body_start_0|>\n args = [Ndisc.NDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count), Ndisc.TIMEOUT_OPTION, str(int(timeout) * 1000)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n if firstReply is True:\n args.append(Ndisc.FIRST_REPLY_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.NDISC6_COMMAND_NAME, args, timeoutSec=timeout + 3, blocking=blocking)\n return rc\n<|end_body_0|>\n\n<|body_start_1|>\n args = [Ndisc.RDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.RDISC6_COMMAND_NAME, args, blocking=blocking)\n return rc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Ndisc", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Ndisc:\n\n def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True):\n \"\"\"This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_0|>\n\n def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True):\n \"\"\"This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = [Ndisc.NDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count), Ndisc.TIMEOUT_OPTION, str(int(timeout) * 1000)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n if firstReply is True:\n args.append(Ndisc.FIRST_REPLY_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.NDISC6_COMMAND_NAME, args, timeoutSec=timeout + 3, blocking=blocking)\n return rc\n<|end_body_0|>\n\n<|body_start_1|>\n args = [Ndisc.RDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.RDISC6_COMMAND_NAME, args, blocking=blocking)\n return rc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000358", "length_bytes": 10343, "license_type": "no_license", "methods": [{"docstring": "This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None", "name": "sendNdiscRequest", "signature": "def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True)"}, {"docstring": "This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None", "name": "sendRdiscReply", "signature": "def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006861", "prompt": "Implement the Python class `Ndisc` described below.\n\nClass description:\nImplement the Ndisc class.\n\nMethod signatures and docstrings:\n- def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True): This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\n- def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True): This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None", "prompted_full_text": "Implement the Python class `Ndisc` described below.\n\nClass description:\nImplement the Ndisc class.\n\nMethod signatures and docstrings:\n- def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True): This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\n- def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True): This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\n\n<|skeleton|>\nclass Ndisc:\n\n def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True):\n \"\"\"This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_0|>\n\n def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True):\n \"\"\"This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = [Ndisc.NDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count), Ndisc.TIMEOUT_OPTION, str(int(timeout) * 1000)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n if firstReply is True:\n args.append(Ndisc.FIRST_REPLY_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.NDISC6_COMMAND_NAME, args, timeoutSec=timeout + 3, blocking=blocking)\n return rc\n<|end_body_0|>\n\n<|body_start_1|>\n args = [Ndisc.RDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.RDISC6_COMMAND_NAME, args, blocking=blocking)\n return rc\n<|end_body_1|>\n", "revision_id": "81bcc74fe7c0ca036ec483f634d7be0bab19a6d0", "skeleton": "<|skeleton|>\nclass Ndisc:\n\n def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True):\n \"\"\"This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_0|>\n\n def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True):\n \"\"\"This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Ndisc:\n def sendNdiscRequest(logger, device, destination, count=3, timeout=1, quiet=False, firstReply=False, blocking=True):\n \"\"\"This function sends IPv6 neighbor discovery to a neighbour host Args: logger device - Name of network device where to send ARP REQUEST packets destination - destination IP to ping count - send ICMPv6 Neighbor Discovery X times timeout - specify a timeout, in seconds, before ndisc exits quiet - quiet output firstReply - Exit as soon as the first advertisement is received blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n args = [Ndisc.NDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count), Ndisc.TIMEOUT_OPTION, str(int(timeout) * 1000)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n if firstReply is True:\n args.append(Ndisc.FIRST_REPLY_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.NDISC6_COMMAND_NAME, args, timeoutSec=timeout + 3, blocking=blocking)\n return rc\n\n def sendRdiscReply(logger, device, destination, count=3, quiet=False, blocking=True):\n \"\"\"This function sends IPv6 router discovery to a neighbour host Args: logger device - Name of network device where to send ICMPv6 Router Discovery packets destination - destination IP to ping count - send ICMPv6 Router Discovery X times quiet - quiet output blocking - if True, waits for command to complete Return: tuple (rc, stdout, stderr) Raise: None\"\"\"\n args = [Ndisc.RDISC6_COMMAND_NAME, Ndisc.COUNT_OPTION, str(count)]\n if quiet is True:\n args.append(Ndisc.QUIET_OPTION)\n args.append(destination)\n args.append(device)\n rc = Command.execute(logger, Ndisc.RDISC6_COMMAND_NAME, args, blocking=blocking)\n return rc\n", "source": "the_stack_v2_python_sparse", "source_path": "oscar/a/sys/net/lnx/neighbour.py", "source_repo": "afeset/miner2-tools", "split": "val", "star_events_count": 0} {"blob_id": "ef9872cf2b7f55967088d64b5e5eb4f641319a88", "bodies": ["threading.Thread.__init__(self)\nself._queue = queue\nself._execution_queue = execution_queue\nself._context = context", "threading.Thread.run(self)\ntest_step = self._queue.get()\ntry:\n test_step.run(self._context)\n self._execution_queue.put((test_step.name, test_step.ts_verdict_msg))\nexcept Exception as ex:\n self._execution_queue.put(ex)\nfinally:\n self._queue.task_done()"], "bodies_text": "<|body_start_0|>\n threading.Thread.__init__(self)\n self._queue = queue\n self._execution_queue = execution_queue\n self._context = context\n<|end_body_0|>\n\n<|body_start_1|>\n threading.Thread.run(self)\n test_step = self._queue.get()\n try:\n test_step.run(self._context)\n self._execution_queue.put((test_step.name, test_step.ts_verdict_msg))\n except Exception as ex:\n self._execution_queue.put(ex)\n finally:\n self._queue.task_done()\n<|end_body_1|>\n", "class_docstring": "Implements thread which runs a test step", "class_name": "ThreadStepRunner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ThreadStepRunner:\n \"\"\"Implements thread which runs a test step\"\"\"\n\n def __init__(self, queue, execution_queue, context):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the test step into a thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self._queue = queue\n self._execution_queue = execution_queue\n self._context = context\n<|end_body_0|>\n\n<|body_start_1|>\n threading.Thread.run(self)\n test_step = self._queue.get()\n try:\n test_step.run(self._context)\n self._execution_queue.put((test_step.name, test_step.ts_verdict_msg))\n except Exception as ex:\n self._execution_queue.put(ex)\n finally:\n self._queue.task_done()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000359", "length_bytes": 1488, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, queue, execution_queue, context)"}, {"docstring": "Runs the test step into a thread.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ThreadStepRunner` described below.\n\nClass description:\nImplements thread which runs a test step\n\nMethod signatures and docstrings:\n- def __init__(self, queue, execution_queue, context): Constructor\n- def run(self): Runs the test step into a thread.", "prompted_full_text": "Implement the Python class `ThreadStepRunner` described below.\n\nClass description:\nImplements thread which runs a test step\n\nMethod signatures and docstrings:\n- def __init__(self, queue, execution_queue, context): Constructor\n- def run(self): Runs the test step into a thread.\n\n<|skeleton|>\nclass ThreadStepRunner:\n \"\"\"Implements thread which runs a test step\"\"\"\n\n def __init__(self, queue, execution_queue, context):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the test step into a thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self._queue = queue\n self._execution_queue = execution_queue\n self._context = context\n<|end_body_0|>\n\n<|body_start_1|>\n threading.Thread.run(self)\n test_step = self._queue.get()\n try:\n test_step.run(self._context)\n self._execution_queue.put((test_step.name, test_step.ts_verdict_msg))\n except Exception as ex:\n self._execution_queue.put(ex)\n finally:\n self._queue.task_done()\n<|end_body_1|>\n", "revision_id": "7bf09f20f117fc74d02b7635305ce664b65cdcba", "skeleton": "<|skeleton|>\nclass ThreadStepRunner:\n \"\"\"Implements thread which runs a test step\"\"\"\n\n def __init__(self, queue, execution_queue, context):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the test step into a thread.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ThreadStepRunner:\n \"\"\"Implements thread which runs a test step\"\"\"\n\n def __init__(self, queue, execution_queue, context):\n \"\"\"Constructor\"\"\"\n threading.Thread.__init__(self)\n self._queue = queue\n self._execution_queue = execution_queue\n self._context = context\n\n def run(self):\n \"\"\"Runs the test step into a thread.\"\"\"\n threading.Thread.run(self)\n test_step = self._queue.get()\n try:\n test_step.run(self._context)\n self._execution_queue.put((test_step.name, test_step.ts_verdict_msg))\n except Exception as ex:\n self._execution_queue.put(ex)\n finally:\n self._queue.task_done()\n", "source": "the_stack_v2_python_sparse", "source_path": "acs/acs/Core/TestStep/ThreadStepRunner.py", "source_repo": "intel/test-framework-and-suites-for-android", "split": "val", "star_events_count": 9} {"blob_id": "a22ed3e64460539683906e2d6e3218494c17a836", "bodies": ["queryset = self.get_child_qs(graph_id)\nserializer = self.get_serializer(queryset, many=True)\nreturn response.Response(serializer.data)", "graph_id = self.request.resolver_match.kwargs['graph_id']\ngraph = self.get_graph(graph_id)\nserializer.save(graph=graph)"], "bodies_text": "<|body_start_0|>\n queryset = self.get_child_qs(graph_id)\n serializer = self.get_serializer(queryset, many=True)\n return response.Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n graph_id = self.request.resolver_match.kwargs['graph_id']\n graph = self.get_graph(graph_id)\n serializer.save(graph=graph)\n<|end_body_1|>\n", "class_docstring": "A ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object", "class_name": "GraphChildListCreateViewMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GraphChildListCreateViewMixin:\n \"\"\"A ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\"\"\"\n\n def list_(self, request, graph_id):\n \"\"\"Return all the children of a given graph\"\"\"\n <|body_0|>\n\n def perform_create_(self, serializer):\n \"\"\"Add the graph to the child object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queryset = self.get_child_qs(graph_id)\n serializer = self.get_serializer(queryset, many=True)\n return response.Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n graph_id = self.request.resolver_match.kwargs['graph_id']\n graph = self.get_graph(graph_id)\n serializer.save(graph=graph)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000360", "length_bytes": 1932, "license_type": "no_license", "methods": [{"docstring": "Return all the children of a given graph", "name": "list_", "signature": "def list_(self, request, graph_id)"}, {"docstring": "Add the graph to the child object.", "name": "perform_create_", "signature": "def perform_create_(self, serializer)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007225", "prompt": "Implement the Python class `GraphChildListCreateViewMixin` described below.\n\nClass description:\nA ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\n\nMethod signatures and docstrings:\n- def list_(self, request, graph_id): Return all the children of a given graph\n- def perform_create_(self, serializer): Add the graph to the child object.", "prompted_full_text": "Implement the Python class `GraphChildListCreateViewMixin` described below.\n\nClass description:\nA ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\n\nMethod signatures and docstrings:\n- def list_(self, request, graph_id): Return all the children of a given graph\n- def perform_create_(self, serializer): Add the graph to the child object.\n\n<|skeleton|>\nclass GraphChildListCreateViewMixin:\n \"\"\"A ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\"\"\"\n\n def list_(self, request, graph_id):\n \"\"\"Return all the children of a given graph\"\"\"\n <|body_0|>\n\n def perform_create_(self, serializer):\n \"\"\"Add the graph to the child object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queryset = self.get_child_qs(graph_id)\n serializer = self.get_serializer(queryset, many=True)\n return response.Response(serializer.data)\n<|end_body_0|>\n\n<|body_start_1|>\n graph_id = self.request.resolver_match.kwargs['graph_id']\n graph = self.get_graph(graph_id)\n serializer.save(graph=graph)\n<|end_body_1|>\n", "revision_id": "9e01ff8ab73f6d9d16606ec1c8b7c91cdfa9cd2c", "skeleton": "<|skeleton|>\nclass GraphChildListCreateViewMixin:\n \"\"\"A ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\"\"\"\n\n def list_(self, request, graph_id):\n \"\"\"Return all the children of a given graph\"\"\"\n <|body_0|>\n\n def perform_create_(self, serializer):\n \"\"\"Add the graph to the child object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GraphChildListCreateViewMixin:\n \"\"\"A ListCreateAPIView mixin for graph children objects. E.g. NodeViews and EdgeViews Child here refers to node or edge object\"\"\"\n\n def list_(self, request, graph_id):\n \"\"\"Return all the children of a given graph\"\"\"\n queryset = self.get_child_qs(graph_id)\n serializer = self.get_serializer(queryset, many=True)\n return response.Response(serializer.data)\n\n def perform_create_(self, serializer):\n \"\"\"Add the graph to the child object.\"\"\"\n graph_id = self.request.resolver_match.kwargs['graph_id']\n graph = self.get_graph(graph_id)\n serializer.save(graph=graph)\n", "source": "the_stack_v2_python_sparse", "source_path": "server/utils/views/mixins.py", "source_repo": "Aviemusca/bjj-digraph", "split": "val", "star_events_count": 0} {"blob_id": "b4f2bf194cce2fd88682429504a88de29c1b1245", "bodies": ["error_found = 0\nif CONF.log_dir:\n logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]\n for file in logs:\n log_file = os.path.join(CONF.log_dir, file)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print_name = 0\n for index, line in enumerate(lines):\n if line.find(' ERROR ') > 0:\n error_found += 1\n if print_name == 0:\n print(log_file + ':-')\n print_name = 1\n print('Line %d : %s' % (len(lines) - index, line))\nif error_found == 0:\n print('No errors in logfiles!')", "entries = int(num_entries)\ncount = 0\nlog_file = ''\nif os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\nelif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\nelse:\n print('Unable to find system log file!')\n sys.exit(1)\nlines = [line.strip() for line in open(log_file, 'r')]\nlines.reverse()\nprint('Last %s manila syslog entries:-' % entries)\nfor line in lines:\n if line.find('manila') > 0:\n count += 1\n print('%s' % line)\n if count == entries:\n break\nif count == 0:\n print('No manila entries in syslog!')"], "bodies_text": "<|body_start_0|>\n error_found = 0\n if CONF.log_dir:\n logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]\n for file in logs:\n log_file = os.path.join(CONF.log_dir, file)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print_name = 0\n for index, line in enumerate(lines):\n if line.find(' ERROR ') > 0:\n error_found += 1\n if print_name == 0:\n print(log_file + ':-')\n print_name = 1\n print('Line %d : %s' % (len(lines) - index, line))\n if error_found == 0:\n print('No errors in logfiles!')\n<|end_body_0|>\n\n<|body_start_1|>\n entries = int(num_entries)\n count = 0\n log_file = ''\n if os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\n elif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\n else:\n print('Unable to find system log file!')\n sys.exit(1)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print('Last %s manila syslog entries:-' % entries)\n for line in lines:\n if line.find('manila') > 0:\n count += 1\n print('%s' % line)\n if count == entries:\n break\n if count == 0:\n print('No manila entries in syslog!')\n<|end_body_1|>\n", "class_docstring": "Get logging information.", "class_name": "GetLogCommands", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetLogCommands:\n \"\"\"Get logging information.\"\"\"\n\n def errors(self):\n \"\"\"Get all of the errors from the log files.\"\"\"\n <|body_0|>\n\n def syslog(self, num_entries=10):\n \"\"\"Get of the manila syslog events.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n error_found = 0\n if CONF.log_dir:\n logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]\n for file in logs:\n log_file = os.path.join(CONF.log_dir, file)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print_name = 0\n for index, line in enumerate(lines):\n if line.find(' ERROR ') > 0:\n error_found += 1\n if print_name == 0:\n print(log_file + ':-')\n print_name = 1\n print('Line %d : %s' % (len(lines) - index, line))\n if error_found == 0:\n print('No errors in logfiles!')\n<|end_body_0|>\n\n<|body_start_1|>\n entries = int(num_entries)\n count = 0\n log_file = ''\n if os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\n elif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\n else:\n print('Unable to find system log file!')\n sys.exit(1)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print('Last %s manila syslog entries:-' % entries)\n for line in lines:\n if line.find('manila') > 0:\n count += 1\n print('%s' % line)\n if count == entries:\n break\n if count == 0:\n print('No manila entries in syslog!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000361", "length_bytes": 19425, "license_type": "permissive", "methods": [{"docstring": "Get all of the errors from the log files.", "name": "errors", "signature": "def errors(self)"}, {"docstring": "Get of the manila syslog events.", "name": "syslog", "signature": "def syslog(self, num_entries=10)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000310", "prompt": "Implement the Python class `GetLogCommands` described below.\n\nClass description:\nGet logging information.\n\nMethod signatures and docstrings:\n- def errors(self): Get all of the errors from the log files.\n- def syslog(self, num_entries=10): Get of the manila syslog events.", "prompted_full_text": "Implement the Python class `GetLogCommands` described below.\n\nClass description:\nGet logging information.\n\nMethod signatures and docstrings:\n- def errors(self): Get all of the errors from the log files.\n- def syslog(self, num_entries=10): Get of the manila syslog events.\n\n<|skeleton|>\nclass GetLogCommands:\n \"\"\"Get logging information.\"\"\"\n\n def errors(self):\n \"\"\"Get all of the errors from the log files.\"\"\"\n <|body_0|>\n\n def syslog(self, num_entries=10):\n \"\"\"Get of the manila syslog events.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n error_found = 0\n if CONF.log_dir:\n logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]\n for file in logs:\n log_file = os.path.join(CONF.log_dir, file)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print_name = 0\n for index, line in enumerate(lines):\n if line.find(' ERROR ') > 0:\n error_found += 1\n if print_name == 0:\n print(log_file + ':-')\n print_name = 1\n print('Line %d : %s' % (len(lines) - index, line))\n if error_found == 0:\n print('No errors in logfiles!')\n<|end_body_0|>\n\n<|body_start_1|>\n entries = int(num_entries)\n count = 0\n log_file = ''\n if os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\n elif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\n else:\n print('Unable to find system log file!')\n sys.exit(1)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print('Last %s manila syslog entries:-' % entries)\n for line in lines:\n if line.find('manila') > 0:\n count += 1\n print('%s' % line)\n if count == entries:\n break\n if count == 0:\n print('No manila entries in syslog!')\n<|end_body_1|>\n", "revision_id": "a93a844398a11a8a85f204782fb9456f7caccdbe", "skeleton": "<|skeleton|>\nclass GetLogCommands:\n \"\"\"Get logging information.\"\"\"\n\n def errors(self):\n \"\"\"Get all of the errors from the log files.\"\"\"\n <|body_0|>\n\n def syslog(self, num_entries=10):\n \"\"\"Get of the manila syslog events.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GetLogCommands:\n \"\"\"Get logging information.\"\"\"\n\n def errors(self):\n \"\"\"Get all of the errors from the log files.\"\"\"\n error_found = 0\n if CONF.log_dir:\n logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]\n for file in logs:\n log_file = os.path.join(CONF.log_dir, file)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print_name = 0\n for index, line in enumerate(lines):\n if line.find(' ERROR ') > 0:\n error_found += 1\n if print_name == 0:\n print(log_file + ':-')\n print_name = 1\n print('Line %d : %s' % (len(lines) - index, line))\n if error_found == 0:\n print('No errors in logfiles!')\n\n def syslog(self, num_entries=10):\n \"\"\"Get of the manila syslog events.\"\"\"\n entries = int(num_entries)\n count = 0\n log_file = ''\n if os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\n elif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\n else:\n print('Unable to find system log file!')\n sys.exit(1)\n lines = [line.strip() for line in open(log_file, 'r')]\n lines.reverse()\n print('Last %s manila syslog entries:-' % entries)\n for line in lines:\n if line.find('manila') > 0:\n count += 1\n print('%s' % line)\n if count == entries:\n break\n if count == 0:\n print('No manila entries in syslog!')\n", "source": "the_stack_v2_python_sparse", "source_path": "manila/cmd/manage.py", "source_repo": "openstack/manila", "split": "val", "star_events_count": 178} {"blob_id": "9f3f628954de779253b554fbda7c6b718f9fb4b0", "bodies": ["super().__init__(coordinator)\nself._site_info = site_info\nself._device_type = device_type\nself._version = status.version\nself.base_unique_id = '_'.join(powerwalls_serial_numbers)", "device_info = {'identifiers': {(DOMAIN, self.base_unique_id)}, 'name': self._site_info.site_name, 'manufacturer': MANUFACTURER}\nmodel = MODEL\nmodel += f' ({self._device_type.name})'\ndevice_info['model'] = model\ndevice_info['sw_version'] = self._version\nreturn device_info"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator)\n self._site_info = site_info\n self._device_type = device_type\n self._version = status.version\n self.base_unique_id = '_'.join(powerwalls_serial_numbers)\n<|end_body_0|>\n\n<|body_start_1|>\n device_info = {'identifiers': {(DOMAIN, self.base_unique_id)}, 'name': self._site_info.site_name, 'manufacturer': MANUFACTURER}\n model = MODEL\n model += f' ({self._device_type.name})'\n device_info['model'] = model\n device_info['sw_version'] = self._version\n return device_info\n<|end_body_1|>\n", "class_docstring": "Base class for powerwall entities.", "class_name": "PowerWallEntity", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PowerWallEntity:\n \"\"\"Base class for powerwall entities.\"\"\"\n\n def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def device_info(self):\n \"\"\"Powerwall device info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self._site_info = site_info\n self._device_type = device_type\n self._version = status.version\n self.base_unique_id = '_'.join(powerwalls_serial_numbers)\n<|end_body_0|>\n\n<|body_start_1|>\n device_info = {'identifiers': {(DOMAIN, self.base_unique_id)}, 'name': self._site_info.site_name, 'manufacturer': MANUFACTURER}\n model = MODEL\n model += f' ({self._device_type.name})'\n device_info['model'] = model\n device_info['sw_version'] = self._version\n return device_info\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000362", "length_bytes": 1145, "license_type": "permissive", "methods": [{"docstring": "Initialize the sensor.", "name": "__init__", "signature": "def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers)"}, {"docstring": "Powerwall device info.", "name": "device_info", "signature": "def device_info(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003125", "prompt": "Implement the Python class `PowerWallEntity` described below.\n\nClass description:\nBase class for powerwall entities.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers): Initialize the sensor.\n- def device_info(self): Powerwall device info.", "prompted_full_text": "Implement the Python class `PowerWallEntity` described below.\n\nClass description:\nBase class for powerwall entities.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers): Initialize the sensor.\n- def device_info(self): Powerwall device info.\n\n<|skeleton|>\nclass PowerWallEntity:\n \"\"\"Base class for powerwall entities.\"\"\"\n\n def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def device_info(self):\n \"\"\"Powerwall device info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator)\n self._site_info = site_info\n self._device_type = device_type\n self._version = status.version\n self.base_unique_id = '_'.join(powerwalls_serial_numbers)\n<|end_body_0|>\n\n<|body_start_1|>\n device_info = {'identifiers': {(DOMAIN, self.base_unique_id)}, 'name': self._site_info.site_name, 'manufacturer': MANUFACTURER}\n model = MODEL\n model += f' ({self._device_type.name})'\n device_info['model'] = model\n device_info['sw_version'] = self._version\n return device_info\n<|end_body_1|>\n", "revision_id": "2fee32fce03bc49e86cf2e7b741a15621a97cce5", "skeleton": "<|skeleton|>\nclass PowerWallEntity:\n \"\"\"Base class for powerwall entities.\"\"\"\n\n def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def device_info(self):\n \"\"\"Powerwall device info.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PowerWallEntity:\n \"\"\"Base class for powerwall entities.\"\"\"\n\n def __init__(self, coordinator, site_info, status, device_type, powerwalls_serial_numbers):\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(coordinator)\n self._site_info = site_info\n self._device_type = device_type\n self._version = status.version\n self.base_unique_id = '_'.join(powerwalls_serial_numbers)\n\n def device_info(self):\n \"\"\"Powerwall device info.\"\"\"\n device_info = {'identifiers': {(DOMAIN, self.base_unique_id)}, 'name': self._site_info.site_name, 'manufacturer': MANUFACTURER}\n model = MODEL\n model += f' ({self._device_type.name})'\n device_info['model'] = model\n device_info['sw_version'] = self._version\n return device_info\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/powerwall/entity.py", "source_repo": "BenWoodford/home-assistant", "split": "val", "star_events_count": 11} {"blob_id": "55e9ad35957ddffed355c6d886ec991d4061b8f1", "bodies": ["self.is_valid = is_valid\nself.browse_node_lookup_request = browse_node_lookup_request\nself.item_search_request = item_search_request\nself.item_lookup_request = item_lookup_request\nself.similarity_lookup_request = similarity_lookup_request\nself.cart_get_request = cart_get_request\nself.cart_add_request = cart_add_request\nself.cart_create_request = cart_create_request\nself.cart_modify_request = cart_modify_request\nself.cart_clear_request = cart_clear_request\nself.errors = errors", "if dictionary is None:\n return None\nis_valid = dictionary.get('IsValid')\nbrowse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\nitem_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\nitem_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\nsimilarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\ncart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\ncart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\ncart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\ncart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\ncart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\nerrors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\nreturn cls(is_valid, browse_node_lookup_request, item_search_request, item_lookup_request, similarity_lookup_request, cart_get_request, cart_add_request, cart_create_request, cart_modify_request, cart_clear_request, errors)"], "bodies_text": "<|body_start_0|>\n self.is_valid = is_valid\n self.browse_node_lookup_request = browse_node_lookup_request\n self.item_search_request = item_search_request\n self.item_lookup_request = item_lookup_request\n self.similarity_lookup_request = similarity_lookup_request\n self.cart_get_request = cart_get_request\n self.cart_add_request = cart_add_request\n self.cart_create_request = cart_create_request\n self.cart_modify_request = cart_modify_request\n self.cart_clear_request = cart_clear_request\n self.errors = errors\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_valid = dictionary.get('IsValid')\n browse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\n item_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\n item_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\n similarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\n cart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\n cart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\n cart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\n cart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\n cart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\n errors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\n return cls(is_valid, browse_node_lookup_request, item_search_request, item_lookup_request, similarity_lookup_request, cart_get_request, cart_add_request, cart_create_request, cart_modify_request, cart_clear_request, errors)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description", "class_name": "Request", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Request:\n \"\"\"Implementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\"\"\"\n\n def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None):\n \"\"\"Constructor for the Request class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_valid = is_valid\n self.browse_node_lookup_request = browse_node_lookup_request\n self.item_search_request = item_search_request\n self.item_lookup_request = item_lookup_request\n self.similarity_lookup_request = similarity_lookup_request\n self.cart_get_request = cart_get_request\n self.cart_add_request = cart_add_request\n self.cart_create_request = cart_create_request\n self.cart_modify_request = cart_modify_request\n self.cart_clear_request = cart_clear_request\n self.errors = errors\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_valid = dictionary.get('IsValid')\n browse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\n item_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\n item_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\n similarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\n cart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\n cart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\n cart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\n cart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\n cart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\n errors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\n return cls(is_valid, browse_node_lookup_request, item_search_request, item_lookup_request, similarity_lookup_request, cart_get_request, cart_add_request, cart_create_request, cart_modify_request, cart_clear_request, errors)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000363", "length_bytes": 6448, "license_type": "permissive", "methods": [{"docstring": "Constructor for the Request class", "name": "__init__", "signature": "def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001800", "prompt": "Implement the Python class `Request` described below.\n\nClass description:\nImplementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\n\nMethod signatures and docstrings:\n- def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None): Constructor for the Request class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `Request` described below.\n\nClass description:\nImplementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\n\nMethod signatures and docstrings:\n- def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None): Constructor for the Request class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass Request:\n \"\"\"Implementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\"\"\"\n\n def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None):\n \"\"\"Constructor for the Request class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_valid = is_valid\n self.browse_node_lookup_request = browse_node_lookup_request\n self.item_search_request = item_search_request\n self.item_lookup_request = item_lookup_request\n self.similarity_lookup_request = similarity_lookup_request\n self.cart_get_request = cart_get_request\n self.cart_add_request = cart_add_request\n self.cart_create_request = cart_create_request\n self.cart_modify_request = cart_modify_request\n self.cart_clear_request = cart_clear_request\n self.errors = errors\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n is_valid = dictionary.get('IsValid')\n browse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\n item_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\n item_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\n similarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\n cart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\n cart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\n cart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\n cart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\n cart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\n errors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\n return cls(is_valid, browse_node_lookup_request, item_search_request, item_lookup_request, similarity_lookup_request, cart_get_request, cart_add_request, cart_create_request, cart_modify_request, cart_clear_request, errors)\n<|end_body_1|>\n", "revision_id": "26ea1019115a1de3b1b37a4b830525e164ac55ce", "skeleton": "<|skeleton|>\nclass Request:\n \"\"\"Implementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\"\"\"\n\n def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None):\n \"\"\"Constructor for the Request class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Request:\n \"\"\"Implementation of the 'Request' model. TODO: type model description here. Attributes: is_valid (string): TODO: type description here. browse_node_lookup_request (BrowseNodeLookupRequest): TODO: type description here. item_search_request (ItemSearchRequest): TODO: type description here. item_lookup_request (ItemLookupRequest): TODO: type description here. similarity_lookup_request (SimilarityLookupRequest): TODO: type description here. cart_get_request (CartGetRequest): TODO: type description here. cart_add_request (CartAddRequest): TODO: type description here. cart_create_request (CartCreateRequest): TODO: type description here. cart_modify_request (CartModifyRequest): TODO: type description\"\"\"\n\n def __init__(self, is_valid=None, browse_node_lookup_request=None, item_search_request=None, item_lookup_request=None, similarity_lookup_request=None, cart_get_request=None, cart_add_request=None, cart_create_request=None, cart_modify_request=None, cart_clear_request=None, errors=None):\n \"\"\"Constructor for the Request class\"\"\"\n self.is_valid = is_valid\n self.browse_node_lookup_request = browse_node_lookup_request\n self.item_search_request = item_search_request\n self.item_lookup_request = item_lookup_request\n self.similarity_lookup_request = similarity_lookup_request\n self.cart_get_request = cart_get_request\n self.cart_add_request = cart_add_request\n self.cart_create_request = cart_create_request\n self.cart_modify_request = cart_modify_request\n self.cart_clear_request = cart_clear_request\n self.errors = errors\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n is_valid = dictionary.get('IsValid')\n browse_node_lookup_request = awsecommerceservice.models.browse_node_lookup_request.BrowseNodeLookupRequest.from_dictionary(dictionary.get('BrowseNodeLookupRequest')) if dictionary.get('BrowseNodeLookupRequest') else None\n item_search_request = awsecommerceservice.models.item_search_request.ItemSearchRequest.from_dictionary(dictionary.get('ItemSearchRequest')) if dictionary.get('ItemSearchRequest') else None\n item_lookup_request = awsecommerceservice.models.item_lookup_request.ItemLookupRequest.from_dictionary(dictionary.get('ItemLookupRequest')) if dictionary.get('ItemLookupRequest') else None\n similarity_lookup_request = awsecommerceservice.models.similarity_lookup_request.SimilarityLookupRequest.from_dictionary(dictionary.get('SimilarityLookupRequest')) if dictionary.get('SimilarityLookupRequest') else None\n cart_get_request = awsecommerceservice.models.cart_get_request.CartGetRequest.from_dictionary(dictionary.get('CartGetRequest')) if dictionary.get('CartGetRequest') else None\n cart_add_request = awsecommerceservice.models.cart_add_request.CartAddRequest.from_dictionary(dictionary.get('CartAddRequest')) if dictionary.get('CartAddRequest') else None\n cart_create_request = awsecommerceservice.models.cart_create_request.CartCreateRequest.from_dictionary(dictionary.get('CartCreateRequest')) if dictionary.get('CartCreateRequest') else None\n cart_modify_request = awsecommerceservice.models.cart_modify_request.CartModifyRequest.from_dictionary(dictionary.get('CartModifyRequest')) if dictionary.get('CartModifyRequest') else None\n cart_clear_request = awsecommerceservice.models.cart_clear_request.CartClearRequest.from_dictionary(dictionary.get('CartClearRequest')) if dictionary.get('CartClearRequest') else None\n errors = awsecommerceservice.models.errors.Errors.from_dictionary(dictionary.get('Errors')) if dictionary.get('Errors') else None\n return cls(is_valid, browse_node_lookup_request, item_search_request, item_lookup_request, similarity_lookup_request, cart_get_request, cart_add_request, cart_create_request, cart_modify_request, cart_clear_request, errors)\n", "source": "the_stack_v2_python_sparse", "source_path": "awsecommerceservice/models/request.py", "source_repo": "nidaizamir/Test-PY", "split": "val", "star_events_count": 0} {"blob_id": "981db8e6227436fa28617cab2656a7401f230519", "bodies": ["self.set_header('content-type', 'application/json')\ntry:\n strategy = StrategyCustDao().get_strategy_by_app_and_name(app, name).get_dict()\n if self.group.is_root():\n self.process_error(-1, 'root用户组没有权限查询策略')\n elif self.group.is_manager():\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n else:\n view_strategy = GroupPermissionDao().get_group_strategy_block(self.group.id)\n be_block_groups_ids = view_strategy.get('be_blocked', [])\n if strategy['group_id'] not in be_block_groups_ids:\n strategy.pop('score')\n else:\n strategy = {}\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\nexcept Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))", "self.set_header('content-type', 'application/json')\ntry:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\nexcept Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))", "self.set_header('content-type', 'application/json')\nbody = self.request.body\nbody['version'] = millis_now()\nbody['group_id'] = self.group.id\ntry:\n new_strategy = Strategy.from_json(body)\nexcept Exception as err:\n return self.process_error(400, 'invalid request content: {}'.format(err.message))\ntry:\n gen_variables_from_strategy(new_strategy, effective_check=False)\n new_strategy.version = millis_now()\n StrategyCustDao().add_strategy(new_strategy)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\nexcept Exception as err:\n logger.error(err)\n self.process_error(500, 'fail to add meta to database')"], "bodies_text": "<|body_start_0|>\n self.set_header('content-type', 'application/json')\n try:\n strategy = StrategyCustDao().get_strategy_by_app_and_name(app, name).get_dict()\n if self.group.is_root():\n self.process_error(-1, 'root用户组没有权限查询策略')\n elif self.group.is_manager():\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n else:\n view_strategy = GroupPermissionDao().get_group_strategy_block(self.group.id)\n be_block_groups_ids = view_strategy.get('be_blocked', [])\n if strategy['group_id'] not in be_block_groups_ids:\n strategy.pop('score')\n else:\n strategy = {}\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_0|>\n\n<|body_start_1|>\n self.set_header('content-type', 'application/json')\n try:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_1|>\n\n<|body_start_2|>\n self.set_header('content-type', 'application/json')\n body = self.request.body\n body['version'] = millis_now()\n body['group_id'] = self.group.id\n try:\n new_strategy = Strategy.from_json(body)\n except Exception as err:\n return self.process_error(400, 'invalid request content: {}'.format(err.message))\n try:\n gen_variables_from_strategy(new_strategy, effective_check=False)\n new_strategy.version = millis_now()\n StrategyCustDao().add_strategy(new_strategy)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.process_error(500, 'fail to add meta to database')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "StrategyQueryHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StrategyQueryHandler:\n\n def get(self, app, name):\n \"\"\"get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_0|>\n\n def delete(self, app, name):\n \"\"\"delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_1|>\n\n def post(self, app, name):\n \"\"\"add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.set_header('content-type', 'application/json')\n try:\n strategy = StrategyCustDao().get_strategy_by_app_and_name(app, name).get_dict()\n if self.group.is_root():\n self.process_error(-1, 'root用户组没有权限查询策略')\n elif self.group.is_manager():\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n else:\n view_strategy = GroupPermissionDao().get_group_strategy_block(self.group.id)\n be_block_groups_ids = view_strategy.get('be_blocked', [])\n if strategy['group_id'] not in be_block_groups_ids:\n strategy.pop('score')\n else:\n strategy = {}\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_0|>\n\n<|body_start_1|>\n self.set_header('content-type', 'application/json')\n try:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_1|>\n\n<|body_start_2|>\n self.set_header('content-type', 'application/json')\n body = self.request.body\n body['version'] = millis_now()\n body['group_id'] = self.group.id\n try:\n new_strategy = Strategy.from_json(body)\n except Exception as err:\n return self.process_error(400, 'invalid request content: {}'.format(err.message))\n try:\n gen_variables_from_strategy(new_strategy, effective_check=False)\n new_strategy.version = millis_now()\n StrategyCustDao().add_strategy(new_strategy)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.process_error(500, 'fail to add meta to database')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000364", "length_bytes": 20036, "license_type": "permissive", "methods": [{"docstring": "get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json", "name": "get", "signature": "def get(self, app, name)"}, {"docstring": "delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json", "name": "delete", "signature": "def delete(self, app, name)"}, {"docstring": "add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json", "name": "post", "signature": "def post(self, app, name)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000517", "prompt": "Implement the Python class `StrategyQueryHandler` described below.\n\nClass description:\nImplement the StrategyQueryHandler class.\n\nMethod signatures and docstrings:\n- def get(self, app, name): get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\n- def delete(self, app, name): delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\n- def post(self, app, name): add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json", "prompted_full_text": "Implement the Python class `StrategyQueryHandler` described below.\n\nClass description:\nImplement the StrategyQueryHandler class.\n\nMethod signatures and docstrings:\n- def get(self, app, name): get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\n- def delete(self, app, name): delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\n- def post(self, app, name): add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json\n\n<|skeleton|>\nclass StrategyQueryHandler:\n\n def get(self, app, name):\n \"\"\"get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_0|>\n\n def delete(self, app, name):\n \"\"\"delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_1|>\n\n def post(self, app, name):\n \"\"\"add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.set_header('content-type', 'application/json')\n try:\n strategy = StrategyCustDao().get_strategy_by_app_and_name(app, name).get_dict()\n if self.group.is_root():\n self.process_error(-1, 'root用户组没有权限查询策略')\n elif self.group.is_manager():\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n else:\n view_strategy = GroupPermissionDao().get_group_strategy_block(self.group.id)\n be_block_groups_ids = view_strategy.get('be_blocked', [])\n if strategy['group_id'] not in be_block_groups_ids:\n strategy.pop('score')\n else:\n strategy = {}\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_0|>\n\n<|body_start_1|>\n self.set_header('content-type', 'application/json')\n try:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n<|end_body_1|>\n\n<|body_start_2|>\n self.set_header('content-type', 'application/json')\n body = self.request.body\n body['version'] = millis_now()\n body['group_id'] = self.group.id\n try:\n new_strategy = Strategy.from_json(body)\n except Exception as err:\n return self.process_error(400, 'invalid request content: {}'.format(err.message))\n try:\n gen_variables_from_strategy(new_strategy, effective_check=False)\n new_strategy.version = millis_now()\n StrategyCustDao().add_strategy(new_strategy)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.process_error(500, 'fail to add meta to database')\n<|end_body_2|>\n", "revision_id": "2e32e6e7b225e0bd87ee8c847c22862f12c51bb1", "skeleton": "<|skeleton|>\nclass StrategyQueryHandler:\n\n def get(self, app, name):\n \"\"\"get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_0|>\n\n def delete(self, app, name):\n \"\"\"delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n <|body_1|>\n\n def post(self, app, name):\n \"\"\"add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StrategyQueryHandler:\n def get(self, app, name):\n \"\"\"get a specific strategy @API summary: get a specific strategy notes: get an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n self.set_header('content-type', 'application/json')\n try:\n strategy = StrategyCustDao().get_strategy_by_app_and_name(app, name).get_dict()\n if self.group.is_root():\n self.process_error(-1, 'root用户组没有权限查询策略')\n elif self.group.is_manager():\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n else:\n view_strategy = GroupPermissionDao().get_group_strategy_block(self.group.id)\n be_block_groups_ids = view_strategy.get('be_blocked', [])\n if strategy['group_id'] not in be_block_groups_ids:\n strategy.pop('score')\n else:\n strategy = {}\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': [strategy]}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n\n def delete(self, app, name):\n \"\"\"delete strategy by its name and app @API summary: delete strategy by its name and app notes: delete an strategy by its name and app tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy produces: - application/json\"\"\"\n self.set_header('content-type', 'application/json')\n try:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({'status': -1, 'msg': 'fail to get data from database'}))\n\n def post(self, app, name):\n \"\"\"add or modify a specific strategy @API summary: add or modify a specific strategy notes: add or modify an strategy according to its app and name tags: - nebula parameters: - name: app in: path required: true type: string description: the app of the strategy - name: name in: path required: true type: string description: the name of the strategy - name: strategy in: body required: true type: json description: the json of the strategy produces: - application/json\"\"\"\n self.set_header('content-type', 'application/json')\n body = self.request.body\n body['version'] = millis_now()\n body['group_id'] = self.group.id\n try:\n new_strategy = Strategy.from_json(body)\n except Exception as err:\n return self.process_error(400, 'invalid request content: {}'.format(err.message))\n try:\n gen_variables_from_strategy(new_strategy, effective_check=False)\n new_strategy.version = millis_now()\n StrategyCustDao().add_strategy(new_strategy)\n self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))\n except Exception as err:\n logger.error(err)\n self.process_error(500, 'fail to add meta to database')\n", "source": "the_stack_v2_python_sparse", "source_path": "nebula/views/strategy.py", "source_repo": "threathunterX/nebula_web", "split": "val", "star_events_count": 2} {"blob_id": "c6c55bce48300b6bf426c66e22874ef62a233694", "bodies": ["super(GroverBondVocabPredictor, self).__init__()\nself.linear = nn.Linear(in_features, vocab_size)\nself.linear_rev = nn.Linear(in_features, vocab_size)\nself.logsoftmax = nn.LogSoftmax(dim=1)", "nm_bonds = embeddings.shape[0]\nids1 = list(range(0, nm_bonds, 2))\nids2 = list(range(1, nm_bonds, 2))\nlogits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])\nreturn self.logsoftmax(logits)"], "bodies_text": "<|body_start_0|>\n super(GroverBondVocabPredictor, self).__init__()\n self.linear = nn.Linear(in_features, vocab_size)\n self.linear_rev = nn.Linear(in_features, vocab_size)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n<|end_body_0|>\n\n<|body_start_1|>\n nm_bonds = embeddings.shape[0]\n ids1 = list(range(0, nm_bonds, 2))\n ids2 = list(range(1, nm_bonds, 2))\n logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])\n return self.logsoftmax(logits)\n<|end_body_1|>\n", "class_docstring": "Layer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])", "class_name": "GroverBondVocabPredictor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroverBondVocabPredictor:\n \"\"\"Layer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\"\"\"\n\n def __init__(self, vocab_size: int, in_features: int=128):\n \"\"\"Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\"\"\"\n <|body_0|>\n\n def forward(self, embeddings):\n \"\"\"Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GroverBondVocabPredictor, self).__init__()\n self.linear = nn.Linear(in_features, vocab_size)\n self.linear_rev = nn.Linear(in_features, vocab_size)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n<|end_body_0|>\n\n<|body_start_1|>\n nm_bonds = embeddings.shape[0]\n ids1 = list(range(0, nm_bonds, 2))\n ids2 = list(range(1, nm_bonds, 2))\n logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])\n return self.logsoftmax(logits)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000365", "length_bytes": 38432, "license_type": "permissive", "methods": [{"docstring": "Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.", "name": "__init__", "signature": "def __init__(self, vocab_size: int, in_features: int=128)"}, {"docstring": "Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)", "name": "forward", "signature": "def forward(self, embeddings)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003968", "prompt": "Implement the Python class `GroverBondVocabPredictor` described below.\n\nClass description:\nLayer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size: int, in_features: int=128): Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\n- def forward(self, embeddings): Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)", "prompted_full_text": "Implement the Python class `GroverBondVocabPredictor` described below.\n\nClass description:\nLayer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size: int, in_features: int=128): Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\n- def forward(self, embeddings): Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)\n\n<|skeleton|>\nclass GroverBondVocabPredictor:\n \"\"\"Layer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\"\"\"\n\n def __init__(self, vocab_size: int, in_features: int=128):\n \"\"\"Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\"\"\"\n <|body_0|>\n\n def forward(self, embeddings):\n \"\"\"Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(GroverBondVocabPredictor, self).__init__()\n self.linear = nn.Linear(in_features, vocab_size)\n self.linear_rev = nn.Linear(in_features, vocab_size)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n<|end_body_0|>\n\n<|body_start_1|>\n nm_bonds = embeddings.shape[0]\n ids1 = list(range(0, nm_bonds, 2))\n ids2 = list(range(1, nm_bonds, 2))\n logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])\n return self.logsoftmax(logits)\n<|end_body_1|>\n", "revision_id": "ee6e67ebcf7bf04259cf13aff6388e2b791fea3d", "skeleton": "<|skeleton|>\nclass GroverBondVocabPredictor:\n \"\"\"Layer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\"\"\"\n\n def __init__(self, vocab_size: int, in_features: int=128):\n \"\"\"Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\"\"\"\n <|body_0|>\n\n def forward(self, embeddings):\n \"\"\"Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GroverBondVocabPredictor:\n \"\"\"Layer for learning contextual information for bonds. The layer is used in Grover architecture to learn contextual information of a bond by predicting the context of a bond from the bond embedding in a multi-class classification setting. The contextual information of a bond are encoded as strings (ex: '(DOUBLE-STEREONONE-NONE)_C-(SINGLE-STEREONONE-NONE)2'). Example ------- >>> from deepchem.models.torch_models.grover_layers import GroverBondVocabPredictor >>> num_bonds = 20 >>> in_features, vocab_size = 16, 10 >>> layer = GroverBondVocabPredictor(vocab_size, in_features) >>> embedding = torch.randn(num_bonds * 2, in_features) >>> result = layer(embedding) >>> result.shape torch.Size([20, 10])\"\"\"\n\n def __init__(self, vocab_size: int, in_features: int=128):\n \"\"\"Initializes GroverBondVocabPredictor Parameters ---------- vocab_size: int Size of vocabulary, used for number of classes in prediction. in_features: int, default: 128 Input feature size of bond embeddings.\"\"\"\n super(GroverBondVocabPredictor, self).__init__()\n self.linear = nn.Linear(in_features, vocab_size)\n self.linear_rev = nn.Linear(in_features, vocab_size)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, embeddings):\n \"\"\"Parameters ---------- embeddings: torch.Tensor bond embeddings of shape (num_bond, in_features) Returns ------- logits: torch.Tensor the prediction for each bond, (num_bond, vocab_size)\"\"\"\n nm_bonds = embeddings.shape[0]\n ids1 = list(range(0, nm_bonds, 2))\n ids2 = list(range(1, nm_bonds, 2))\n logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])\n return self.logsoftmax(logits)\n", "source": "the_stack_v2_python_sparse", "source_path": "deepchem/models/torch_models/grover_layers.py", "source_repo": "deepchem/deepchem", "split": "val", "star_events_count": 4876} {"blob_id": "19c9bcf19d67e868f2b5c68808f5f8201bfd4dc7", "bodies": ["try:\n return (int(key[0] // 16), int(key[1] // 16), int(key[2] // 16))\nexcept ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "minx, innerx = divmod(key[0], 16)\nminy, innery = divmod(key[1], 16)\nminz, innerz = divmod(key[2], 16)\nminx = int(minx)\nminy = int(miny)\nminz = int(minz)\nmaxx = minx + 1\nmaxy = miny + 1\nmaxz = minz + 1\nif innerx <= radius:\n minx -= 1\nif innery <= radius:\n miny -= 1\nif innerz <= radius:\n minz -= 1\nif innerx + radius >= 16:\n maxx += 1\nif innery + radius >= 16:\n maxy += 1\nif innerz + radius >= 16:\n maxz += 1\nexpand = int(radius // 16)\nminx -= expand\nminy -= expand\nminz -= expand\nmaxx += expand\nmaxy += expand\nmaxz += expand\nreturn product(xrange(minx, maxx), xrange(miny, maxy), xrange(minz, maxz))"], "bodies_text": "<|body_start_0|>\n try:\n return (int(key[0] // 16), int(key[1] // 16), int(key[2] // 16))\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))\n<|end_body_0|>\n\n<|body_start_1|>\n minx, innerx = divmod(key[0], 16)\n miny, innery = divmod(key[1], 16)\n minz, innerz = divmod(key[2], 16)\n minx = int(minx)\n miny = int(miny)\n minz = int(minz)\n maxx = minx + 1\n maxy = miny + 1\n maxz = minz + 1\n if innerx <= radius:\n minx -= 1\n if innery <= radius:\n miny -= 1\n if innerz <= radius:\n minz -= 1\n if innerx + radius >= 16:\n maxx += 1\n if innery + radius >= 16:\n maxy += 1\n if innerz + radius >= 16:\n maxz += 1\n expand = int(radius // 16)\n minx -= expand\n miny -= expand\n minz -= expand\n maxx += expand\n maxy += expand\n maxz += expand\n return product(xrange(minx, maxx), xrange(miny, maxy), xrange(minz, maxz))\n<|end_body_1|>\n", "class_docstring": "Class for tracking blocks in the XZ-plane.", "class_name": "Block3DSpatialDict", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Block3DSpatialDict:\n \"\"\"Class for tracking blocks in the XZ-plane.\"\"\"\n\n def key_for_bucket(self, key):\n \"\"\"Partition keys into chunk-sized buckets.\"\"\"\n <|body_0|>\n\n def keys_near(self, key, radius):\n \"\"\"Get all bucket keys \"near\" this key. This method may return a generator.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return (int(key[0] // 16), int(key[1] // 16), int(key[2] // 16))\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))\n<|end_body_0|>\n\n<|body_start_1|>\n minx, innerx = divmod(key[0], 16)\n miny, innery = divmod(key[1], 16)\n minz, innerz = divmod(key[2], 16)\n minx = int(minx)\n miny = int(miny)\n minz = int(minz)\n maxx = minx + 1\n maxy = miny + 1\n maxz = minz + 1\n if innerx <= radius:\n minx -= 1\n if innery <= radius:\n miny -= 1\n if innerz <= radius:\n minz -= 1\n if innerx + radius >= 16:\n maxx += 1\n if innery + radius >= 16:\n maxy += 1\n if innerz + radius >= 16:\n maxz += 1\n expand = int(radius // 16)\n minx -= expand\n miny -= expand\n minz -= expand\n maxx += expand\n maxy += expand\n maxz += expand\n return product(xrange(minx, maxx), xrange(miny, maxy), xrange(minz, maxz))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000366", "length_bytes": 5213, "license_type": "permissive", "methods": [{"docstring": "Partition keys into chunk-sized buckets.", "name": "key_for_bucket", "signature": "def key_for_bucket(self, key)"}, {"docstring": "Get all bucket keys \"near\" this key. This method may return a generator.", "name": "keys_near", "signature": "def keys_near(self, key, radius)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002421", "prompt": "Implement the Python class `Block3DSpatialDict` described below.\n\nClass description:\nClass for tracking blocks in the XZ-plane.\n\nMethod signatures and docstrings:\n- def key_for_bucket(self, key): Partition keys into chunk-sized buckets.\n- def keys_near(self, key, radius): Get all bucket keys \"near\" this key. This method may return a generator.", "prompted_full_text": "Implement the Python class `Block3DSpatialDict` described below.\n\nClass description:\nClass for tracking blocks in the XZ-plane.\n\nMethod signatures and docstrings:\n- def key_for_bucket(self, key): Partition keys into chunk-sized buckets.\n- def keys_near(self, key, radius): Get all bucket keys \"near\" this key. This method may return a generator.\n\n<|skeleton|>\nclass Block3DSpatialDict:\n \"\"\"Class for tracking blocks in the XZ-plane.\"\"\"\n\n def key_for_bucket(self, key):\n \"\"\"Partition keys into chunk-sized buckets.\"\"\"\n <|body_0|>\n\n def keys_near(self, key, radius):\n \"\"\"Get all bucket keys \"near\" this key. This method may return a generator.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return (int(key[0] // 16), int(key[1] // 16), int(key[2] // 16))\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))\n<|end_body_0|>\n\n<|body_start_1|>\n minx, innerx = divmod(key[0], 16)\n miny, innery = divmod(key[1], 16)\n minz, innerz = divmod(key[2], 16)\n minx = int(minx)\n miny = int(miny)\n minz = int(minz)\n maxx = minx + 1\n maxy = miny + 1\n maxz = minz + 1\n if innerx <= radius:\n minx -= 1\n if innery <= radius:\n miny -= 1\n if innerz <= radius:\n minz -= 1\n if innerx + radius >= 16:\n maxx += 1\n if innery + radius >= 16:\n maxy += 1\n if innerz + radius >= 16:\n maxz += 1\n expand = int(radius // 16)\n minx -= expand\n miny -= expand\n minz -= expand\n maxx += expand\n maxy += expand\n maxz += expand\n return product(xrange(minx, maxx), xrange(miny, maxy), xrange(minz, maxz))\n<|end_body_1|>\n", "revision_id": "7be5d792871a8447499911fa1502c6a7c1437dc3", "skeleton": "<|skeleton|>\nclass Block3DSpatialDict:\n \"\"\"Class for tracking blocks in the XZ-plane.\"\"\"\n\n def key_for_bucket(self, key):\n \"\"\"Partition keys into chunk-sized buckets.\"\"\"\n <|body_0|>\n\n def keys_near(self, key, radius):\n \"\"\"Get all bucket keys \"near\" this key. This method may return a generator.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Block3DSpatialDict:\n \"\"\"Class for tracking blocks in the XZ-plane.\"\"\"\n\n def key_for_bucket(self, key):\n \"\"\"Partition keys into chunk-sized buckets.\"\"\"\n try:\n return (int(key[0] // 16), int(key[1] // 16), int(key[2] // 16))\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))\n\n def keys_near(self, key, radius):\n \"\"\"Get all bucket keys \"near\" this key. This method may return a generator.\"\"\"\n minx, innerx = divmod(key[0], 16)\n miny, innery = divmod(key[1], 16)\n minz, innerz = divmod(key[2], 16)\n minx = int(minx)\n miny = int(miny)\n minz = int(minz)\n maxx = minx + 1\n maxy = miny + 1\n maxz = minz + 1\n if innerx <= radius:\n minx -= 1\n if innery <= radius:\n miny -= 1\n if innerz <= radius:\n minz -= 1\n if innerx + radius >= 16:\n maxx += 1\n if innery + radius >= 16:\n maxy += 1\n if innerz + radius >= 16:\n maxz += 1\n expand = int(radius // 16)\n minx -= expand\n miny -= expand\n minz -= expand\n maxx += expand\n maxy += expand\n maxz += expand\n return product(xrange(minx, maxx), xrange(miny, maxy), xrange(minz, maxz))\n", "source": "the_stack_v2_python_sparse", "source_path": "bravo/utilities/spatial.py", "source_repo": "CyberFlameGO/bravo", "split": "val", "star_events_count": 0} {"blob_id": "b317920e0921b5cc8e78bd74714dde79cd8a7871", "bodies": ["self.teacher_model = teacher_model\nif 'args' in kwargs:\n assert isinstance(kwargs['args'], DistillerTrainingArguments), '`args` should be an instance of `DistillerTrainingArguments`.'\nelse:\n kwargs['args'] = DistillerTrainingArguments('tmp')\nsuper().__init__(**kwargs)", "student_outputs = model(**inputs)\nstudent_loss = student_outputs['loss']\nstudent_logits = student_outputs['logits']\nwith torch.no_grad():\n teacher_outputs = self.teacher_model(**inputs)\n teacher_logits = teacher_outputs['logits']\nkl_loss = nn.KLDivLoss(reduction='batchmean')\nkl_divergence = kl_loss(F.log_softmax(student_logits / self.args.temperature, dim=-1), F.softmax(teacher_logits / self.args.temperature, dim=-1))\nkd_loss = self.args.temperature ** 2 * kl_divergence\nloss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss\nreturn (loss, student_outputs) if return_outputs else loss"], "bodies_text": "<|body_start_0|>\n self.teacher_model = teacher_model\n if 'args' in kwargs:\n assert isinstance(kwargs['args'], DistillerTrainingArguments), '`args` should be an instance of `DistillerTrainingArguments`.'\n else:\n kwargs['args'] = DistillerTrainingArguments('tmp')\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n student_outputs = model(**inputs)\n student_loss = student_outputs['loss']\n student_logits = student_outputs['logits']\n with torch.no_grad():\n teacher_outputs = self.teacher_model(**inputs)\n teacher_logits = teacher_outputs['logits']\n kl_loss = nn.KLDivLoss(reduction='batchmean')\n kl_divergence = kl_loss(F.log_softmax(student_logits / self.args.temperature, dim=-1), F.softmax(teacher_logits / self.args.temperature, dim=-1))\n kd_loss = self.args.temperature ** 2 * kl_divergence\n loss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss\n return (loss, student_outputs) if return_outputs else loss\n<|end_body_1|>\n", "class_docstring": "Hugging Face distillation-based trainer.", "class_name": "HfDistillerTrainer", "detected_licenses": ["MIT", "LicenseRef-scancode-free-unknown", "LGPL-2.1-or-later", "Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HfDistillerTrainer:\n \"\"\"Hugging Face distillation-based trainer.\"\"\"\n\n def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None:\n \"\"\"Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\"\"\"\n <|body_0|>\n\n def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]:\n \"\"\"Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.teacher_model = teacher_model\n if 'args' in kwargs:\n assert isinstance(kwargs['args'], DistillerTrainingArguments), '`args` should be an instance of `DistillerTrainingArguments`.'\n else:\n kwargs['args'] = DistillerTrainingArguments('tmp')\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n student_outputs = model(**inputs)\n student_loss = student_outputs['loss']\n student_logits = student_outputs['logits']\n with torch.no_grad():\n teacher_outputs = self.teacher_model(**inputs)\n teacher_logits = teacher_outputs['logits']\n kl_loss = nn.KLDivLoss(reduction='batchmean')\n kl_divergence = kl_loss(F.log_softmax(student_logits / self.args.temperature, dim=-1), F.softmax(teacher_logits / self.args.temperature, dim=-1))\n kd_loss = self.args.temperature ** 2 * kl_divergence\n loss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss\n return (loss, student_outputs) if return_outputs else loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000367", "length_bytes": 4544, "license_type": "permissive", "methods": [{"docstring": "Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.", "name": "__init__", "signature": "def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None"}, {"docstring": "Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.", "name": "compute_loss", "signature": "def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]"}], "n_methods": 2, "prompt": "Implement the Python class `HfDistillerTrainer` described below.\n\nClass description:\nHugging Face distillation-based trainer.\n\nMethod signatures and docstrings:\n- def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None: Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\n- def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]: Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.", "prompted_full_text": "Implement the Python class `HfDistillerTrainer` described below.\n\nClass description:\nHugging Face distillation-based trainer.\n\nMethod signatures and docstrings:\n- def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None: Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\n- def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]: Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.\n\n<|skeleton|>\nclass HfDistillerTrainer:\n \"\"\"Hugging Face distillation-based trainer.\"\"\"\n\n def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None:\n \"\"\"Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\"\"\"\n <|body_0|>\n\n def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]:\n \"\"\"Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.teacher_model = teacher_model\n if 'args' in kwargs:\n assert isinstance(kwargs['args'], DistillerTrainingArguments), '`args` should be an instance of `DistillerTrainingArguments`.'\n else:\n kwargs['args'] = DistillerTrainingArguments('tmp')\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n student_outputs = model(**inputs)\n student_loss = student_outputs['loss']\n student_logits = student_outputs['logits']\n with torch.no_grad():\n teacher_outputs = self.teacher_model(**inputs)\n teacher_logits = teacher_outputs['logits']\n kl_loss = nn.KLDivLoss(reduction='batchmean')\n kl_divergence = kl_loss(F.log_softmax(student_logits / self.args.temperature, dim=-1), F.softmax(teacher_logits / self.args.temperature, dim=-1))\n kd_loss = self.args.temperature ** 2 * kl_divergence\n loss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss\n return (loss, student_outputs) if return_outputs else loss\n<|end_body_1|>\n", "revision_id": "95d6e19a1523a701b3fbc249dd1a7d1e7ba44aee", "skeleton": "<|skeleton|>\nclass HfDistillerTrainer:\n \"\"\"Hugging Face distillation-based trainer.\"\"\"\n\n def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None:\n \"\"\"Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\"\"\"\n <|body_0|>\n\n def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]:\n \"\"\"Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HfDistillerTrainer:\n \"\"\"Hugging Face distillation-based trainer.\"\"\"\n\n def __init__(self, teacher_model: torch.nn.Module, **kwargs) -> None:\n \"\"\"Initialize Hugging Face distillation-based trainer. Args: teacher_model: Pre-trained teacher model.\"\"\"\n self.teacher_model = teacher_model\n if 'args' in kwargs:\n assert isinstance(kwargs['args'], DistillerTrainingArguments), '`args` should be an instance of `DistillerTrainingArguments`.'\n else:\n kwargs['args'] = DistillerTrainingArguments('tmp')\n super().__init__(**kwargs)\n\n def compute_loss(self, model: torch.nn.Module, inputs: Dict[str, torch.Tensor], return_outputs: Optional[bool]=False) -> Tuple[torch.Tensor, ...]:\n \"\"\"Override the computation of the loss function. The loss is a weighted sum of the student's loss, as computed by the original `HfTrainer`, and the KL divergence between the student and teacher models. Args: model: Student model. inputs: Input tensors. return_outputs: Whether outputs should be returned. Returns: (loss, outputs) or the loss tensor.\"\"\"\n student_outputs = model(**inputs)\n student_loss = student_outputs['loss']\n student_logits = student_outputs['logits']\n with torch.no_grad():\n teacher_outputs = self.teacher_model(**inputs)\n teacher_logits = teacher_outputs['logits']\n kl_loss = nn.KLDivLoss(reduction='batchmean')\n kl_divergence = kl_loss(F.log_softmax(student_logits / self.args.temperature, dim=-1), F.softmax(teacher_logits / self.args.temperature, dim=-1))\n kd_loss = self.args.temperature ** 2 * kl_divergence\n loss = self.args.alpha * student_loss + (1 - self.args.alpha) * kd_loss\n return (loss, student_outputs) if return_outputs else loss\n", "source": "the_stack_v2_python_sparse", "source_path": "archai/trainers/nlp/hf_trainer.py", "source_repo": "microsoft/archai", "split": "val", "star_events_count": 439} {"blob_id": "83a70ed2d61353c68c47156f851f49936d5fc15c", "bodies": ["self.job_id = job_id\nself.num_machines_failed = num_machines_failed\nself.num_machines_passed = num_machines_passed\nself.num_machines_total = num_machines_total\nself.registering_app = registering_app\nself.state = state", "if dictionary is None:\n return None\njob_id = dictionary.get('jobId')\nnum_machines_failed = dictionary.get('numMachinesFailed')\nnum_machines_passed = dictionary.get('numMachinesPassed')\nnum_machines_total = dictionary.get('numMachinesTotal')\nregistering_app = dictionary.get('registeringApp')\nstate = dictionary.get('state')\nreturn cls(job_id, num_machines_failed, num_machines_passed, num_machines_total, registering_app, state)"], "bodies_text": "<|body_start_0|>\n self.job_id = job_id\n self.num_machines_failed = num_machines_failed\n self.num_machines_passed = num_machines_passed\n self.num_machines_total = num_machines_total\n self.registering_app = registering_app\n self.state = state\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n job_id = dictionary.get('jobId')\n num_machines_failed = dictionary.get('numMachinesFailed')\n num_machines_passed = dictionary.get('numMachinesPassed')\n num_machines_total = dictionary.get('numMachinesTotal')\n registering_app = dictionary.get('registeringApp')\n state = dictionary.get('state')\n return cls(job_id, num_machines_failed, num_machines_passed, num_machines_total, registering_app, state)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns", "class_name": "BulkInstallAppTaskInfo", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BulkInstallAppTaskInfo:\n \"\"\"Implementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\"\"\"\n\n def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None):\n \"\"\"Constructor for the BulkInstallAppTaskInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.job_id = job_id\n self.num_machines_failed = num_machines_failed\n self.num_machines_passed = num_machines_passed\n self.num_machines_total = num_machines_total\n self.registering_app = registering_app\n self.state = state\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n job_id = dictionary.get('jobId')\n num_machines_failed = dictionary.get('numMachinesFailed')\n num_machines_passed = dictionary.get('numMachinesPassed')\n num_machines_total = dictionary.get('numMachinesTotal')\n registering_app = dictionary.get('registeringApp')\n state = dictionary.get('state')\n return cls(job_id, num_machines_failed, num_machines_passed, num_machines_total, registering_app, state)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000368", "length_bytes": 3321, "license_type": "permissive", "methods": [{"docstring": "Constructor for the BulkInstallAppTaskInfo class", "name": "__init__", "signature": "def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `BulkInstallAppTaskInfo` described below.\n\nClass description:\nImplementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\n\nMethod signatures and docstrings:\n- def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None): Constructor for the BulkInstallAppTaskInfo class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `BulkInstallAppTaskInfo` described below.\n\nClass description:\nImplementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\n\nMethod signatures and docstrings:\n- def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None): Constructor for the BulkInstallAppTaskInfo class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass BulkInstallAppTaskInfo:\n \"\"\"Implementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\"\"\"\n\n def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None):\n \"\"\"Constructor for the BulkInstallAppTaskInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.job_id = job_id\n self.num_machines_failed = num_machines_failed\n self.num_machines_passed = num_machines_passed\n self.num_machines_total = num_machines_total\n self.registering_app = registering_app\n self.state = state\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n job_id = dictionary.get('jobId')\n num_machines_failed = dictionary.get('numMachinesFailed')\n num_machines_passed = dictionary.get('numMachinesPassed')\n num_machines_total = dictionary.get('numMachinesTotal')\n registering_app = dictionary.get('registeringApp')\n state = dictionary.get('state')\n return cls(job_id, num_machines_failed, num_machines_passed, num_machines_total, registering_app, state)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass BulkInstallAppTaskInfo:\n \"\"\"Implementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\"\"\"\n\n def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None):\n \"\"\"Constructor for the BulkInstallAppTaskInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BulkInstallAppTaskInfo:\n \"\"\"Implementation of the 'BulkInstallAppTaskInfo' model. Parameters for a bulk install app task. Attributes: job_id (string): Job id of the task. num_machines_failed (int): Number of machines on which task is started. num_machines_passed (int): Number of machines on which task is started. num_machines_total (int): Number of machines on which task is started. registering_app (RegisteringAppEnum): Application being registered. This param is used to indicate the app for which the job is created. 'oracle' indicates that the job was created for oracle app. 'msSql' indicates that the job was created for msSql app. 'physical' indicates that the job was created for physical machine. state (StateBulkIns\"\"\"\n\n def __init__(self, job_id=None, num_machines_failed=None, num_machines_passed=None, num_machines_total=None, registering_app=None, state=None):\n \"\"\"Constructor for the BulkInstallAppTaskInfo class\"\"\"\n self.job_id = job_id\n self.num_machines_failed = num_machines_failed\n self.num_machines_passed = num_machines_passed\n self.num_machines_total = num_machines_total\n self.registering_app = registering_app\n self.state = state\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n job_id = dictionary.get('jobId')\n num_machines_failed = dictionary.get('numMachinesFailed')\n num_machines_passed = dictionary.get('numMachinesPassed')\n num_machines_total = dictionary.get('numMachinesTotal')\n registering_app = dictionary.get('registeringApp')\n state = dictionary.get('state')\n return cls(job_id, num_machines_failed, num_machines_passed, num_machines_total, registering_app, state)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/bulk_install_app_task_info.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "55ae62165f757289721c3796b3aaa2206ff3ad2a", "bodies": ["self.is_categorical = is_categorical\nself.is_binary = len(unique_values) == 2\nself.unique_values = unique_values\nself.min = min(unique_values)\nself.max = max(unique_values)", "mean = stats.mean(unique_values)\nstdev = stats.stdev(unique_values)\nreturn [mean - stdev, mean + stdev]", "min_max_diff = self.max - self.min\nbound_diff = upper_bound - lower_bound\nreturn (value - self.min) / min_max_diff * bound_diff + lower_bound", "encoded = []\nif self.is_categorical:\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n else:\n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\nelse:\n normalized = self.__normalize(value, -1, 1)\n encoded.append(normalized)\nreturn encoded"], "bodies_text": "<|body_start_0|>\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n self.min = min(unique_values)\n self.max = max(unique_values)\n<|end_body_0|>\n\n<|body_start_1|>\n mean = stats.mean(unique_values)\n stdev = stats.stdev(unique_values)\n return [mean - stdev, mean + stdev]\n<|end_body_1|>\n\n<|body_start_2|>\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound\n<|end_body_2|>\n\n<|body_start_3|>\n encoded = []\n if self.is_categorical:\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n else:\n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\n else:\n normalized = self.__normalize(value, -1, 1)\n encoded.append(normalized)\n return encoded\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Encoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Encoder:\n\n def __init__(self, unique_values, is_categorical):\n \"\"\"Constructor of an Encoder using one-hot-encoding\"\"\"\n <|body_0|>\n\n def __get_stdev_band(self, unique_values):\n \"\"\"Get the lower bound and upper bound for the standard devaitation band for continuous value.\"\"\"\n <|body_1|>\n\n def __normalize(self, value, lower_bound, upper_bound):\n \"\"\"Normalize the value to the lower bound and upper bound by the max & min\"\"\"\n <|body_2|>\n\n def encode(self, value):\n \"\"\"Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n self.min = min(unique_values)\n self.max = max(unique_values)\n<|end_body_0|>\n\n<|body_start_1|>\n mean = stats.mean(unique_values)\n stdev = stats.stdev(unique_values)\n return [mean - stdev, mean + stdev]\n<|end_body_1|>\n\n<|body_start_2|>\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound\n<|end_body_2|>\n\n<|body_start_3|>\n encoded = []\n if self.is_categorical:\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n else:\n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\n else:\n normalized = self.__normalize(value, -1, 1)\n encoded.append(normalized)\n return encoded\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000369", "length_bytes": 1820, "license_type": "no_license", "methods": [{"docstring": "Constructor of an Encoder using one-hot-encoding", "name": "__init__", "signature": "def __init__(self, unique_values, is_categorical)"}, {"docstring": "Get the lower bound and upper bound for the standard devaitation band for continuous value.", "name": "__get_stdev_band", "signature": "def __get_stdev_band(self, unique_values)"}, {"docstring": "Normalize the value to the lower bound and upper bound by the max & min", "name": "__normalize", "signature": "def __normalize(self, value, lower_bound, upper_bound)"}, {"docstring": "Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.", "name": "encode", "signature": "def encode(self, value)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002793", "prompt": "Implement the Python class `Encoder` described below.\n\nClass description:\nImplement the Encoder class.\n\nMethod signatures and docstrings:\n- def __init__(self, unique_values, is_categorical): Constructor of an Encoder using one-hot-encoding\n- def __get_stdev_band(self, unique_values): Get the lower bound and upper bound for the standard devaitation band for continuous value.\n- def __normalize(self, value, lower_bound, upper_bound): Normalize the value to the lower bound and upper bound by the max & min\n- def encode(self, value): Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.", "prompted_full_text": "Implement the Python class `Encoder` described below.\n\nClass description:\nImplement the Encoder class.\n\nMethod signatures and docstrings:\n- def __init__(self, unique_values, is_categorical): Constructor of an Encoder using one-hot-encoding\n- def __get_stdev_band(self, unique_values): Get the lower bound and upper bound for the standard devaitation band for continuous value.\n- def __normalize(self, value, lower_bound, upper_bound): Normalize the value to the lower bound and upper bound by the max & min\n- def encode(self, value): Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.\n\n<|skeleton|>\nclass Encoder:\n\n def __init__(self, unique_values, is_categorical):\n \"\"\"Constructor of an Encoder using one-hot-encoding\"\"\"\n <|body_0|>\n\n def __get_stdev_band(self, unique_values):\n \"\"\"Get the lower bound and upper bound for the standard devaitation band for continuous value.\"\"\"\n <|body_1|>\n\n def __normalize(self, value, lower_bound, upper_bound):\n \"\"\"Normalize the value to the lower bound and upper bound by the max & min\"\"\"\n <|body_2|>\n\n def encode(self, value):\n \"\"\"Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n self.min = min(unique_values)\n self.max = max(unique_values)\n<|end_body_0|>\n\n<|body_start_1|>\n mean = stats.mean(unique_values)\n stdev = stats.stdev(unique_values)\n return [mean - stdev, mean + stdev]\n<|end_body_1|>\n\n<|body_start_2|>\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound\n<|end_body_2|>\n\n<|body_start_3|>\n encoded = []\n if self.is_categorical:\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n else:\n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\n else:\n normalized = self.__normalize(value, -1, 1)\n encoded.append(normalized)\n return encoded\n<|end_body_3|>\n", "revision_id": "9ae339f81fc7134ba9058fe975dec9ac7e3aaba4", "skeleton": "<|skeleton|>\nclass Encoder:\n\n def __init__(self, unique_values, is_categorical):\n \"\"\"Constructor of an Encoder using one-hot-encoding\"\"\"\n <|body_0|>\n\n def __get_stdev_band(self, unique_values):\n \"\"\"Get the lower bound and upper bound for the standard devaitation band for continuous value.\"\"\"\n <|body_1|>\n\n def __normalize(self, value, lower_bound, upper_bound):\n \"\"\"Normalize the value to the lower bound and upper bound by the max & min\"\"\"\n <|body_2|>\n\n def encode(self, value):\n \"\"\"Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Encoder:\n def __init__(self, unique_values, is_categorical):\n \"\"\"Constructor of an Encoder using one-hot-encoding\"\"\"\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n self.min = min(unique_values)\n self.max = max(unique_values)\n\n def __get_stdev_band(self, unique_values):\n \"\"\"Get the lower bound and upper bound for the standard devaitation band for continuous value.\"\"\"\n mean = stats.mean(unique_values)\n stdev = stats.stdev(unique_values)\n return [mean - stdev, mean + stdev]\n\n def __normalize(self, value, lower_bound, upper_bound):\n \"\"\"Normalize the value to the lower bound and upper bound by the max & min\"\"\"\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound\n\n def encode(self, value):\n \"\"\"Get one-hot encoding for a value based on the unique values in this encoder. Return a list of 0s except 1 at the index that matches the unique value index.\"\"\"\n encoded = []\n if self.is_categorical:\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n else:\n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\n else:\n normalized = self.__normalize(value, -1, 1)\n encoded.append(normalized)\n return encoded\n", "source": "the_stack_v2_python_sparse", "source_path": "Project6/encoding.py", "source_repo": "vincy0320/School_Intro_to_ML", "split": "val", "star_events_count": 0} {"blob_id": "d283d7777f0a055648dbbd3de345dcc0b327d241", "bodies": ["login_page.LoginPage(self.driver).login()\nsleep(2)\nlandlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\nsleep(2)\nlandlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\nsleep(2)\nlandlord_nav_page.LandlordNavPage(self.driver).activitymanager()\nsleep(1)\nlandlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\npo = landlord_activity_page.LandlordActivity(self.driver)\nprint(po.text())\nsleep(2)\npo.active_good()\nsleep(2)\nfunction.insert_img(self.driver, 'activity_good.png')\npo.img_close()", "landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\nsleep(2)\nlandlord_nav_page.LandlordNavPage(self.driver).activitymanager()\nsleep(1)\npo = landlord_activity_page.LandlordActivity(self.driver)\nsleep(2)\npo.regular_desc()\nfunction.insert_img(self.driver, 'regular_desc.png')\nprint(po.regular_desc_text())\npo.regular_desc_close()"], "bodies_text": "<|body_start_0|>\n login_page.LoginPage(self.driver).login()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n po = landlord_activity_page.LandlordActivity(self.driver)\n print(po.text())\n sleep(2)\n po.active_good()\n sleep(2)\n function.insert_img(self.driver, 'activity_good.png')\n po.img_close()\n<|end_body_0|>\n\n<|body_start_1|>\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n po = landlord_activity_page.LandlordActivity(self.driver)\n sleep(2)\n po.regular_desc()\n function.insert_img(self.driver, 'regular_desc.png')\n print(po.regular_desc_text())\n po.regular_desc_close()\n<|end_body_1|>\n", "class_docstring": "活动设置", "class_name": "TestActivity", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestActivity:\n \"\"\"活动设置\"\"\"\n\n def test_active_good(self):\n \"\"\"活动好处\"\"\"\n <|body_0|>\n\n def test_regular_desc(self):\n \"\"\"活动规则\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n login_page.LoginPage(self.driver).login()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n po = landlord_activity_page.LandlordActivity(self.driver)\n print(po.text())\n sleep(2)\n po.active_good()\n sleep(2)\n function.insert_img(self.driver, 'activity_good.png')\n po.img_close()\n<|end_body_0|>\n\n<|body_start_1|>\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n po = landlord_activity_page.LandlordActivity(self.driver)\n sleep(2)\n po.regular_desc()\n function.insert_img(self.driver, 'regular_desc.png')\n print(po.regular_desc_text())\n po.regular_desc_close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000370", "length_bytes": 1807, "license_type": "permissive", "methods": [{"docstring": "活动好处", "name": "test_active_good", "signature": "def test_active_good(self)"}, {"docstring": "活动规则", "name": "test_regular_desc", "signature": "def test_regular_desc(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000087", "prompt": "Implement the Python class `TestActivity` described below.\n\nClass description:\n活动设置\n\nMethod signatures and docstrings:\n- def test_active_good(self): 活动好处\n- def test_regular_desc(self): 活动规则", "prompted_full_text": "Implement the Python class `TestActivity` described below.\n\nClass description:\n活动设置\n\nMethod signatures and docstrings:\n- def test_active_good(self): 活动好处\n- def test_regular_desc(self): 活动规则\n\n<|skeleton|>\nclass TestActivity:\n \"\"\"活动设置\"\"\"\n\n def test_active_good(self):\n \"\"\"活动好处\"\"\"\n <|body_0|>\n\n def test_regular_desc(self):\n \"\"\"活动规则\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n login_page.LoginPage(self.driver).login()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n po = landlord_activity_page.LandlordActivity(self.driver)\n print(po.text())\n sleep(2)\n po.active_good()\n sleep(2)\n function.insert_img(self.driver, 'activity_good.png')\n po.img_close()\n<|end_body_0|>\n\n<|body_start_1|>\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n po = landlord_activity_page.LandlordActivity(self.driver)\n sleep(2)\n po.regular_desc()\n function.insert_img(self.driver, 'regular_desc.png')\n print(po.regular_desc_text())\n po.regular_desc_close()\n<|end_body_1|>\n", "revision_id": "192c70c49a8e9e072b9d0d0136f02c653c589410", "skeleton": "<|skeleton|>\nclass TestActivity:\n \"\"\"活动设置\"\"\"\n\n def test_active_good(self):\n \"\"\"活动好处\"\"\"\n <|body_0|>\n\n def test_regular_desc(self):\n \"\"\"活动规则\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestActivity:\n \"\"\"活动设置\"\"\"\n\n def test_active_good(self):\n \"\"\"活动好处\"\"\"\n login_page.LoginPage(self.driver).login()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()\n po = landlord_activity_page.LandlordActivity(self.driver)\n print(po.text())\n sleep(2)\n po.active_good()\n sleep(2)\n function.insert_img(self.driver, 'activity_good.png')\n po.img_close()\n\n def test_regular_desc(self):\n \"\"\"活动规则\"\"\"\n landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()\n sleep(2)\n landlord_nav_page.LandlordNavPage(self.driver).activitymanager()\n sleep(1)\n po = landlord_activity_page.LandlordActivity(self.driver)\n sleep(2)\n po.regular_desc()\n function.insert_img(self.driver, 'regular_desc.png')\n print(po.regular_desc_text())\n po.regular_desc_close()\n", "source": "the_stack_v2_python_sparse", "source_path": "mayi/test_case/test_landlord_activity.py", "source_repo": "18701016443/mayi", "split": "val", "star_events_count": 0} {"blob_id": "60302b984384069445ac8e17abe3149213fa76b6", "bodies": ["parser.add_argument('--current', action='store_true', help='Version stored in database')\nparser.add_argument('--target', action='store_true', help='Version stored in settings')\nparser.add_argument('--update', action='store_true', help='Update database version')", "site = models.SiteSettings.objects.get()\ncurrent = site.version or '0.0.1'\ntarget = VERSION\nif options.get('current'):\n print(current)\n return\nif options.get('target'):\n print(target)\n return\nif options.get('update'):\n site.version = target\n site.save()\n return\nif current != target:\n print(f'{current}/{target}')\nelse:\n print(current)"], "bodies_text": "<|body_start_0|>\n parser.add_argument('--current', action='store_true', help='Version stored in database')\n parser.add_argument('--target', action='store_true', help='Version stored in settings')\n parser.add_argument('--update', action='store_true', help='Update database version')\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n current = site.version or '0.0.1'\n target = VERSION\n if options.get('current'):\n print(current)\n return\n if options.get('target'):\n print(target)\n return\n if options.get('update'):\n site.version = target\n site.save()\n return\n if current != target:\n print(f'{current}/{target}')\n else:\n print(current)\n<|end_body_1|>\n", "class_docstring": "command-line options", "class_name": "Command", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Command:\n \"\"\"command-line options\"\"\"\n\n def add_arguments(self, parser):\n \"\"\"specify which function to run\"\"\"\n <|body_0|>\n\n def handle(self, *args, **options):\n \"\"\"execute init\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('--current', action='store_true', help='Version stored in database')\n parser.add_argument('--target', action='store_true', help='Version stored in settings')\n parser.add_argument('--update', action='store_true', help='Update database version')\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n current = site.version or '0.0.1'\n target = VERSION\n if options.get('current'):\n print(current)\n return\n if options.get('target'):\n print(target)\n return\n if options.get('update'):\n site.version = target\n site.save()\n return\n if current != target:\n print(f'{current}/{target}')\n else:\n print(current)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000371", "length_bytes": 1408, "license_type": "no_license", "methods": [{"docstring": "specify which function to run", "name": "add_arguments", "signature": "def add_arguments(self, parser)"}, {"docstring": "execute init", "name": "handle", "signature": "def handle(self, *args, **options)"}], "n_methods": 2, "prompt": "Implement the Python class `Command` described below.\n\nClass description:\ncommand-line options\n\nMethod signatures and docstrings:\n- def add_arguments(self, parser): specify which function to run\n- def handle(self, *args, **options): execute init", "prompted_full_text": "Implement the Python class `Command` described below.\n\nClass description:\ncommand-line options\n\nMethod signatures and docstrings:\n- def add_arguments(self, parser): specify which function to run\n- def handle(self, *args, **options): execute init\n\n<|skeleton|>\nclass Command:\n \"\"\"command-line options\"\"\"\n\n def add_arguments(self, parser):\n \"\"\"specify which function to run\"\"\"\n <|body_0|>\n\n def handle(self, *args, **options):\n \"\"\"execute init\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('--current', action='store_true', help='Version stored in database')\n parser.add_argument('--target', action='store_true', help='Version stored in settings')\n parser.add_argument('--update', action='store_true', help='Update database version')\n<|end_body_0|>\n\n<|body_start_1|>\n site = models.SiteSettings.objects.get()\n current = site.version or '0.0.1'\n target = VERSION\n if options.get('current'):\n print(current)\n return\n if options.get('target'):\n print(target)\n return\n if options.get('update'):\n site.version = target\n site.save()\n return\n if current != target:\n print(f'{current}/{target}')\n else:\n print(current)\n<|end_body_1|>\n", "revision_id": "0f8da5b738047f3c34d60d93f59bdedd8f797224", "skeleton": "<|skeleton|>\nclass Command:\n \"\"\"command-line options\"\"\"\n\n def add_arguments(self, parser):\n \"\"\"specify which function to run\"\"\"\n <|body_0|>\n\n def handle(self, *args, **options):\n \"\"\"execute init\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Command:\n \"\"\"command-line options\"\"\"\n\n def add_arguments(self, parser):\n \"\"\"specify which function to run\"\"\"\n parser.add_argument('--current', action='store_true', help='Version stored in database')\n parser.add_argument('--target', action='store_true', help='Version stored in settings')\n parser.add_argument('--update', action='store_true', help='Update database version')\n\n def handle(self, *args, **options):\n \"\"\"execute init\"\"\"\n site = models.SiteSettings.objects.get()\n current = site.version or '0.0.1'\n target = VERSION\n if options.get('current'):\n print(current)\n return\n if options.get('target'):\n print(target)\n return\n if options.get('update'):\n site.version = target\n site.save()\n return\n if current != target:\n print(f'{current}/{target}')\n else:\n print(current)\n", "source": "the_stack_v2_python_sparse", "source_path": "bookwyrm/management/commands/instance_version.py", "source_repo": "bookwyrm-social/bookwyrm", "split": "val", "star_events_count": 1398} {"blob_id": "af788f1b33b24a6c2c3e80cb974c69b73c94106a", "bodies": ["try:\n login_options = OrgService.get_login_options_for_org(org_id, allowed_roles=ALL_ALLOWED_ROLES)\n response, status = (jsonify({'loginOption': login_options.login_source if login_options else None}), http_status.HTTP_200_OK)\nexcept BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\nreturn (response, status)", "request_json = request.get_json()\nlogin_option_val = request_json.get('loginOption')\ntry:\n login_option = OrgService.add_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\nexcept BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\nreturn (response, status)", "request_json = request.get_json()\nlogin_option_val = request_json.get('loginOption')\ntry:\n login_option = OrgService.update_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\nexcept BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\nreturn (response, status)"], "bodies_text": "<|body_start_0|>\n try:\n login_options = OrgService.get_login_options_for_org(org_id, allowed_roles=ALL_ALLOWED_ROLES)\n response, status = (jsonify({'loginOption': login_options.login_source if login_options else None}), http_status.HTTP_200_OK)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_0|>\n\n<|body_start_1|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.add_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_1|>\n\n<|body_start_2|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.update_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_2|>\n", "class_docstring": "Resource for managing org login options.", "class_name": "OrgLoginOptions", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OrgLoginOptions:\n \"\"\"Resource for managing org login options.\"\"\"\n\n def get(org_id):\n \"\"\"Retrieve the set of payment settings associated with the specified org.\"\"\"\n <|body_0|>\n\n def post(org_id):\n \"\"\"Create a new login type for the specified org.\"\"\"\n <|body_1|>\n\n def put(org_id):\n \"\"\"Update a new login type for the specified org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n login_options = OrgService.get_login_options_for_org(org_id, allowed_roles=ALL_ALLOWED_ROLES)\n response, status = (jsonify({'loginOption': login_options.login_source if login_options else None}), http_status.HTTP_200_OK)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_0|>\n\n<|body_start_1|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.add_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_1|>\n\n<|body_start_2|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.update_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000372", "length_bytes": 30185, "license_type": "permissive", "methods": [{"docstring": "Retrieve the set of payment settings associated with the specified org.", "name": "get", "signature": "def get(org_id)"}, {"docstring": "Create a new login type for the specified org.", "name": "post", "signature": "def post(org_id)"}, {"docstring": "Update a new login type for the specified org.", "name": "put", "signature": "def put(org_id)"}], "n_methods": 3, "prompt": "Implement the Python class `OrgLoginOptions` described below.\n\nClass description:\nResource for managing org login options.\n\nMethod signatures and docstrings:\n- def get(org_id): Retrieve the set of payment settings associated with the specified org.\n- def post(org_id): Create a new login type for the specified org.\n- def put(org_id): Update a new login type for the specified org.", "prompted_full_text": "Implement the Python class `OrgLoginOptions` described below.\n\nClass description:\nResource for managing org login options.\n\nMethod signatures and docstrings:\n- def get(org_id): Retrieve the set of payment settings associated with the specified org.\n- def post(org_id): Create a new login type for the specified org.\n- def put(org_id): Update a new login type for the specified org.\n\n<|skeleton|>\nclass OrgLoginOptions:\n \"\"\"Resource for managing org login options.\"\"\"\n\n def get(org_id):\n \"\"\"Retrieve the set of payment settings associated with the specified org.\"\"\"\n <|body_0|>\n\n def post(org_id):\n \"\"\"Create a new login type for the specified org.\"\"\"\n <|body_1|>\n\n def put(org_id):\n \"\"\"Update a new login type for the specified org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n login_options = OrgService.get_login_options_for_org(org_id, allowed_roles=ALL_ALLOWED_ROLES)\n response, status = (jsonify({'loginOption': login_options.login_source if login_options else None}), http_status.HTTP_200_OK)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_0|>\n\n<|body_start_1|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.add_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_1|>\n\n<|body_start_2|>\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.update_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n<|end_body_2|>\n", "revision_id": "923cb8a3ee88dcbaf0fe800ca70022b3c13c1d01", "skeleton": "<|skeleton|>\nclass OrgLoginOptions:\n \"\"\"Resource for managing org login options.\"\"\"\n\n def get(org_id):\n \"\"\"Retrieve the set of payment settings associated with the specified org.\"\"\"\n <|body_0|>\n\n def post(org_id):\n \"\"\"Create a new login type for the specified org.\"\"\"\n <|body_1|>\n\n def put(org_id):\n \"\"\"Update a new login type for the specified org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OrgLoginOptions:\n \"\"\"Resource for managing org login options.\"\"\"\n\n def get(org_id):\n \"\"\"Retrieve the set of payment settings associated with the specified org.\"\"\"\n try:\n login_options = OrgService.get_login_options_for_org(org_id, allowed_roles=ALL_ALLOWED_ROLES)\n response, status = (jsonify({'loginOption': login_options.login_source if login_options else None}), http_status.HTTP_200_OK)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n\n def post(org_id):\n \"\"\"Create a new login type for the specified org.\"\"\"\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.add_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n\n def put(org_id):\n \"\"\"Update a new login type for the specified org.\"\"\"\n request_json = request.get_json()\n login_option_val = request_json.get('loginOption')\n try:\n login_option = OrgService.update_login_option(org_id, login_option_val)\n response, status = (jsonify({'login_option': login_option.login_source}), http_status.HTTP_201_CREATED)\n except BusinessException as exception:\n response, status = ({'code': exception.code, 'message': exception.message}, exception.status_code)\n return (response, status)\n", "source": "the_stack_v2_python_sparse", "source_path": "auth-api/src/auth_api/resources/org.py", "source_repo": "bcgov/sbc-auth", "split": "val", "star_events_count": 13} {"blob_id": "57ac49ff2cbd10e51d5a864969d7dc8f7092bd18", "bodies": ["features = features.contiguous()\nindices = indices.contiguous()\nif features_batch_cnt is not None and indices_batch_cnt is not None:\n assert features_batch_cnt.dtype == torch.int\n assert indices_batch_cnt.dtype == torch.int\n M, nsample = indices.size()\n N, C = features.size()\n B = indices_batch_cnt.shape[0]\n output = features.new_zeros((M, C, nsample))\n ext_module.stack_group_points_forward(features, features_batch_cnt, indices, indices_batch_cnt, output, b=B, m=M, c=C, nsample=nsample)\n ctx.for_backwards = (B, N, indices, features_batch_cnt, indices_batch_cnt)\nelse:\n B, nfeatures, nsample = indices.size()\n _, C, N = features.size()\n output = features.new_zeros(B, C, nfeatures, nsample)\n ext_module.group_points_forward(features, indices, output, b=B, c=C, n=N, npoints=nfeatures, nsample=nsample)\n ctx.for_backwards = (indices, N)\nreturn output", "if len(ctx.for_backwards) != 5:\n idx, N = ctx.for_backwards\n B, C, npoint, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(B, C, N)\n grad_out_data = grad_out.data.contiguous()\n ext_module.group_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint, nsample=nsample)\n return (grad_features, None)\nelse:\n B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards\n M, C, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(N, C)\n grad_out_data = grad_out.data.contiguous()\n ext_module.stack_group_points_backward(grad_out_data, idx, idx_batch_cnt, features_batch_cnt, grad_features.data, b=B, c=C, m=M, n=N, nsample=nsample)\n return (grad_features, None, None, None)"], "bodies_text": "<|body_start_0|>\n features = features.contiguous()\n indices = indices.contiguous()\n if features_batch_cnt is not None and indices_batch_cnt is not None:\n assert features_batch_cnt.dtype == torch.int\n assert indices_batch_cnt.dtype == torch.int\n M, nsample = indices.size()\n N, C = features.size()\n B = indices_batch_cnt.shape[0]\n output = features.new_zeros((M, C, nsample))\n ext_module.stack_group_points_forward(features, features_batch_cnt, indices, indices_batch_cnt, output, b=B, m=M, c=C, nsample=nsample)\n ctx.for_backwards = (B, N, indices, features_batch_cnt, indices_batch_cnt)\n else:\n B, nfeatures, nsample = indices.size()\n _, C, N = features.size()\n output = features.new_zeros(B, C, nfeatures, nsample)\n ext_module.group_points_forward(features, indices, output, b=B, c=C, n=N, npoints=nfeatures, nsample=nsample)\n ctx.for_backwards = (indices, N)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if len(ctx.for_backwards) != 5:\n idx, N = ctx.for_backwards\n B, C, npoint, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(B, C, N)\n grad_out_data = grad_out.data.contiguous()\n ext_module.group_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint, nsample=nsample)\n return (grad_features, None)\n else:\n B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards\n M, C, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(N, C)\n grad_out_data = grad_out.data.contiguous()\n ext_module.stack_group_points_backward(grad_out_data, idx, idx_batch_cnt, features_batch_cnt, grad_features.data, b=B, c=C, m=M, n=N, nsample=nsample)\n return (grad_features, None, None, None)\n<|end_body_1|>\n", "class_docstring": "Group feature with given index.", "class_name": "GroupingOperation", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupingOperation:\n \"\"\"Group feature with given index.\"\"\"\n\n def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple:\n \"\"\"Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n features = features.contiguous()\n indices = indices.contiguous()\n if features_batch_cnt is not None and indices_batch_cnt is not None:\n assert features_batch_cnt.dtype == torch.int\n assert indices_batch_cnt.dtype == torch.int\n M, nsample = indices.size()\n N, C = features.size()\n B = indices_batch_cnt.shape[0]\n output = features.new_zeros((M, C, nsample))\n ext_module.stack_group_points_forward(features, features_batch_cnt, indices, indices_batch_cnt, output, b=B, m=M, c=C, nsample=nsample)\n ctx.for_backwards = (B, N, indices, features_batch_cnt, indices_batch_cnt)\n else:\n B, nfeatures, nsample = indices.size()\n _, C, N = features.size()\n output = features.new_zeros(B, C, nfeatures, nsample)\n ext_module.group_points_forward(features, indices, output, b=B, c=C, n=N, npoints=nfeatures, nsample=nsample)\n ctx.for_backwards = (indices, N)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if len(ctx.for_backwards) != 5:\n idx, N = ctx.for_backwards\n B, C, npoint, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(B, C, N)\n grad_out_data = grad_out.data.contiguous()\n ext_module.group_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint, nsample=nsample)\n return (grad_features, None)\n else:\n B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards\n M, C, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(N, C)\n grad_out_data = grad_out.data.contiguous()\n ext_module.stack_group_points_backward(grad_out_data, idx, idx_batch_cnt, features_batch_cnt, grad_features.data, b=B, c=C, m=M, n=N, nsample=nsample)\n return (grad_features, None, None, None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000373", "length_bytes": 10890, "license_type": "permissive", "methods": [{"docstring": "Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).", "name": "forward", "signature": "def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor"}, {"docstring": "Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.", "name": "backward", "signature": "def backward(ctx, grad_out: torch.Tensor) -> Tuple"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004350", "prompt": "Implement the Python class `GroupingOperation` described below.\n\nClass description:\nGroup feature with given index.\n\nMethod signatures and docstrings:\n- def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor: Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\n- def backward(ctx, grad_out: torch.Tensor) -> Tuple: Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.", "prompted_full_text": "Implement the Python class `GroupingOperation` described below.\n\nClass description:\nGroup feature with given index.\n\nMethod signatures and docstrings:\n- def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor: Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\n- def backward(ctx, grad_out: torch.Tensor) -> Tuple: Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.\n\n<|skeleton|>\nclass GroupingOperation:\n \"\"\"Group feature with given index.\"\"\"\n\n def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple:\n \"\"\"Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n features = features.contiguous()\n indices = indices.contiguous()\n if features_batch_cnt is not None and indices_batch_cnt is not None:\n assert features_batch_cnt.dtype == torch.int\n assert indices_batch_cnt.dtype == torch.int\n M, nsample = indices.size()\n N, C = features.size()\n B = indices_batch_cnt.shape[0]\n output = features.new_zeros((M, C, nsample))\n ext_module.stack_group_points_forward(features, features_batch_cnt, indices, indices_batch_cnt, output, b=B, m=M, c=C, nsample=nsample)\n ctx.for_backwards = (B, N, indices, features_batch_cnt, indices_batch_cnt)\n else:\n B, nfeatures, nsample = indices.size()\n _, C, N = features.size()\n output = features.new_zeros(B, C, nfeatures, nsample)\n ext_module.group_points_forward(features, indices, output, b=B, c=C, n=N, npoints=nfeatures, nsample=nsample)\n ctx.for_backwards = (indices, N)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if len(ctx.for_backwards) != 5:\n idx, N = ctx.for_backwards\n B, C, npoint, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(B, C, N)\n grad_out_data = grad_out.data.contiguous()\n ext_module.group_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint, nsample=nsample)\n return (grad_features, None)\n else:\n B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards\n M, C, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(N, C)\n grad_out_data = grad_out.data.contiguous()\n ext_module.stack_group_points_backward(grad_out_data, idx, idx_batch_cnt, features_batch_cnt, grad_features.data, b=B, c=C, m=M, n=N, nsample=nsample)\n return (grad_features, None, None, None)\n<|end_body_1|>\n", "revision_id": "6e9ee26718b22961d5c34caca4108413b1b7b3af", "skeleton": "<|skeleton|>\nclass GroupingOperation:\n \"\"\"Group feature with given index.\"\"\"\n\n def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple:\n \"\"\"Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GroupingOperation:\n \"\"\"Group feature with given index.\"\"\"\n\n def forward(ctx, features: torch.Tensor, indices: torch.Tensor, features_batch_cnt: Optional[torch.Tensor]=None, indices_batch_cnt: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Args: features (Tensor): Tensor of features to group, input shape is (B, C, N) or stacked inputs (N1 + N2 ..., C). indices (Tensor): The indices of features to group with, input shape is (B, npoint, nsample) or stacked inputs (M1 + M2 ..., nsample). features_batch_cnt (Tensor, optional): Input features nums in each batch, just like (N1, N2, ...). Defaults to None. New in version 1.7.0. indices_batch_cnt (Tensor, optional): Input indices nums in each batch, just like (M1, M2, ...). Defaults to None. New in version 1.7.0. Returns: Tensor: Grouped features, the shape is (B, C, npoint, nsample) or (M1 + M2 ..., C, nsample).\"\"\"\n features = features.contiguous()\n indices = indices.contiguous()\n if features_batch_cnt is not None and indices_batch_cnt is not None:\n assert features_batch_cnt.dtype == torch.int\n assert indices_batch_cnt.dtype == torch.int\n M, nsample = indices.size()\n N, C = features.size()\n B = indices_batch_cnt.shape[0]\n output = features.new_zeros((M, C, nsample))\n ext_module.stack_group_points_forward(features, features_batch_cnt, indices, indices_batch_cnt, output, b=B, m=M, c=C, nsample=nsample)\n ctx.for_backwards = (B, N, indices, features_batch_cnt, indices_batch_cnt)\n else:\n B, nfeatures, nsample = indices.size()\n _, C, N = features.size()\n output = features.new_zeros(B, C, nfeatures, nsample)\n ext_module.group_points_forward(features, indices, output, b=B, c=C, n=N, npoints=nfeatures, nsample=nsample)\n ctx.for_backwards = (indices, N)\n return output\n\n def backward(ctx, grad_out: torch.Tensor) -> Tuple:\n \"\"\"Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features.\"\"\"\n if len(ctx.for_backwards) != 5:\n idx, N = ctx.for_backwards\n B, C, npoint, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(B, C, N)\n grad_out_data = grad_out.data.contiguous()\n ext_module.group_points_backward(grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint, nsample=nsample)\n return (grad_features, None)\n else:\n B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards\n M, C, nsample = grad_out.size()\n grad_features = grad_out.new_zeros(N, C)\n grad_out_data = grad_out.data.contiguous()\n ext_module.stack_group_points_backward(grad_out_data, idx, idx_batch_cnt, features_batch_cnt, grad_features.data, b=B, c=C, m=M, n=N, nsample=nsample)\n return (grad_features, None, None, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "mmcv/ops/group_points.py", "source_repo": "open-mmlab/mmcv", "split": "val", "star_events_count": 5319} {"blob_id": "3accda0b5b82073651a328298b202670814179ec", "bodies": ["self.robot = robot\nself.relative_phase = relative_phase\nself.v = v\nself.a = a\nself.R = R\nself.amp_offset = amp_offset\nself.phase_offset = phase_offset\nself.phase_biases = self.generate_biases(relative_phase)", "phase_biases = np.zeros((self.robot.n_oscillators, self.robot.n_oscillators))\nfor i in range(self.robot.n_legs):\n phase_biases[i][i + self.robot.n_legs] = -self.phase_offset[0][i]\n phase_biases[i][i + 2 * self.robot.n_legs] = -self.phase_offset[1][i]\n phase_biases[i + self.robot.n_legs][i] = self.phase_offset[0][i]\n phase_biases[i + 2 * self.robot.n_legs][i] = self.phase_offset[1][i]\n for j in range(self.robot.n_legs):\n phase_biases[i][j] = relative_phase[i][j]\nreturn phase_biases"], "bodies_text": "<|body_start_0|>\n self.robot = robot\n self.relative_phase = relative_phase\n self.v = v\n self.a = a\n self.R = R\n self.amp_offset = amp_offset\n self.phase_offset = phase_offset\n self.phase_biases = self.generate_biases(relative_phase)\n<|end_body_0|>\n\n<|body_start_1|>\n phase_biases = np.zeros((self.robot.n_oscillators, self.robot.n_oscillators))\n for i in range(self.robot.n_legs):\n phase_biases[i][i + self.robot.n_legs] = -self.phase_offset[0][i]\n phase_biases[i][i + 2 * self.robot.n_legs] = -self.phase_offset[1][i]\n phase_biases[i + self.robot.n_legs][i] = self.phase_offset[0][i]\n phase_biases[i + 2 * self.robot.n_legs][i] = self.phase_offset[1][i]\n for j in range(self.robot.n_legs):\n phase_biases[i][j] = relative_phase[i][j]\n return phase_biases\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Gait", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Gait:\n\n def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset):\n \"\"\"Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\"\"\"\n <|body_0|>\n\n def generate_biases(self, relative_phase):\n \"\"\"Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.robot = robot\n self.relative_phase = relative_phase\n self.v = v\n self.a = a\n self.R = R\n self.amp_offset = amp_offset\n self.phase_offset = phase_offset\n self.phase_biases = self.generate_biases(relative_phase)\n<|end_body_0|>\n\n<|body_start_1|>\n phase_biases = np.zeros((self.robot.n_oscillators, self.robot.n_oscillators))\n for i in range(self.robot.n_legs):\n phase_biases[i][i + self.robot.n_legs] = -self.phase_offset[0][i]\n phase_biases[i][i + 2 * self.robot.n_legs] = -self.phase_offset[1][i]\n phase_biases[i + self.robot.n_legs][i] = self.phase_offset[0][i]\n phase_biases[i + 2 * self.robot.n_legs][i] = self.phase_offset[1][i]\n for j in range(self.robot.n_legs):\n phase_biases[i][j] = relative_phase[i][j]\n return phase_biases\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000374", "length_bytes": 9855, "license_type": "no_license", "methods": [{"docstring": "Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint", "name": "__init__", "signature": "def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset)"}, {"docstring": "Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints", "name": "generate_biases", "signature": "def generate_biases(self, relative_phase)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000401", "prompt": "Implement the Python class `Gait` described below.\n\nClass description:\nImplement the Gait class.\n\nMethod signatures and docstrings:\n- def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset): Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\n- def generate_biases(self, relative_phase): Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints", "prompted_full_text": "Implement the Python class `Gait` described below.\n\nClass description:\nImplement the Gait class.\n\nMethod signatures and docstrings:\n- def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset): Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\n- def generate_biases(self, relative_phase): Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints\n\n<|skeleton|>\nclass Gait:\n\n def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset):\n \"\"\"Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\"\"\"\n <|body_0|>\n\n def generate_biases(self, relative_phase):\n \"\"\"Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.robot = robot\n self.relative_phase = relative_phase\n self.v = v\n self.a = a\n self.R = R\n self.amp_offset = amp_offset\n self.phase_offset = phase_offset\n self.phase_biases = self.generate_biases(relative_phase)\n<|end_body_0|>\n\n<|body_start_1|>\n phase_biases = np.zeros((self.robot.n_oscillators, self.robot.n_oscillators))\n for i in range(self.robot.n_legs):\n phase_biases[i][i + self.robot.n_legs] = -self.phase_offset[0][i]\n phase_biases[i][i + 2 * self.robot.n_legs] = -self.phase_offset[1][i]\n phase_biases[i + self.robot.n_legs][i] = self.phase_offset[0][i]\n phase_biases[i + 2 * self.robot.n_legs][i] = self.phase_offset[1][i]\n for j in range(self.robot.n_legs):\n phase_biases[i][j] = relative_phase[i][j]\n return phase_biases\n<|end_body_1|>\n", "revision_id": "463c5555a1b3c28c0d73bd05521e9758eef15e0e", "skeleton": "<|skeleton|>\nclass Gait:\n\n def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset):\n \"\"\"Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\"\"\"\n <|body_0|>\n\n def generate_biases(self, relative_phase):\n \"\"\"Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Gait:\n def __init__(self, robot, relative_phase, v, a, R, amp_offset, phase_offset):\n \"\"\"Class containing default components of a gait. Formulation similar to Crespi2008 :param robot: Robot object :param relative_phase: phase difference between base joints :param v: frequency of each CPG - fixed and same right now :param R: amplitude of CPG - fixed and same right now :param a: positive constant :param phase_offset: Phase difference between first-second and second-third joint\"\"\"\n self.robot = robot\n self.relative_phase = relative_phase\n self.v = v\n self.a = a\n self.R = R\n self.amp_offset = amp_offset\n self.phase_offset = phase_offset\n self.phase_biases = self.generate_biases(relative_phase)\n\n def generate_biases(self, relative_phase):\n \"\"\"Generates the bias matrix depending on the gait. relative_phase defines the phase difference between the 6 base joints\"\"\"\n phase_biases = np.zeros((self.robot.n_oscillators, self.robot.n_oscillators))\n for i in range(self.robot.n_legs):\n phase_biases[i][i + self.robot.n_legs] = -self.phase_offset[0][i]\n phase_biases[i][i + 2 * self.robot.n_legs] = -self.phase_offset[1][i]\n phase_biases[i + self.robot.n_legs][i] = self.phase_offset[0][i]\n phase_biases[i + 2 * self.robot.n_legs][i] = self.phase_offset[1][i]\n for j in range(self.robot.n_legs):\n phase_biases[i][j] = relative_phase[i][j]\n return phase_biases\n", "source": "the_stack_v2_python_sparse", "source_path": "gym-daisy-custom/gym_daisy_custom/control/gaits.py", "source_repo": "contactrika/bo-svae-dc", "split": "val", "star_events_count": 6} {"blob_id": "fe1a15bdd485b7ce09a64646d007ae0154748c2d", "bodies": ["try:\n return base64.b64encode(pickle.dumps(obj)).decode()\nexcept pickle.PicklingError:\n pass", "try:\n return pickle.loads(base64.b64decode(obj_str.encode()))\nexcept pickle.UnpicklingError:\n pass"], "bodies_text": "<|body_start_0|>\n try:\n return base64.b64encode(pickle.dumps(obj)).decode()\n except pickle.PicklingError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return pickle.loads(base64.b64decode(obj_str.encode()))\n except pickle.UnpicklingError:\n pass\n<|end_body_1|>\n", "class_docstring": "transform object and string", "class_name": "ObjectTransform", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObjectTransform:\n \"\"\"transform object and string\"\"\"\n\n def pickle_dumps_to_str(cls, obj):\n \"\"\"from object to str\"\"\"\n <|body_0|>\n\n def pickle_loads_from_str(cls, obj_str):\n \"\"\"from str to object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return base64.b64encode(pickle.dumps(obj)).decode()\n except pickle.PicklingError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return pickle.loads(base64.b64decode(obj_str.encode()))\n except pickle.UnpicklingError:\n pass\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000375", "length_bytes": 1183, "license_type": "permissive", "methods": [{"docstring": "from object to str", "name": "pickle_dumps_to_str", "signature": "def pickle_dumps_to_str(cls, obj)"}, {"docstring": "from str to object", "name": "pickle_loads_from_str", "signature": "def pickle_loads_from_str(cls, obj_str)"}], "n_methods": 2, "prompt": "Implement the Python class `ObjectTransform` described below.\n\nClass description:\ntransform object and string\n\nMethod signatures and docstrings:\n- def pickle_dumps_to_str(cls, obj): from object to str\n- def pickle_loads_from_str(cls, obj_str): from str to object", "prompted_full_text": "Implement the Python class `ObjectTransform` described below.\n\nClass description:\ntransform object and string\n\nMethod signatures and docstrings:\n- def pickle_dumps_to_str(cls, obj): from object to str\n- def pickle_loads_from_str(cls, obj_str): from str to object\n\n<|skeleton|>\nclass ObjectTransform:\n \"\"\"transform object and string\"\"\"\n\n def pickle_dumps_to_str(cls, obj):\n \"\"\"from object to str\"\"\"\n <|body_0|>\n\n def pickle_loads_from_str(cls, obj_str):\n \"\"\"from str to object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return base64.b64encode(pickle.dumps(obj)).decode()\n except pickle.PicklingError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return pickle.loads(base64.b64decode(obj_str.encode()))\n except pickle.UnpicklingError:\n pass\n<|end_body_1|>\n", "revision_id": "b8ec015fa9e16c0a879c619ee1f2aab8a393c7bd", "skeleton": "<|skeleton|>\nclass ObjectTransform:\n \"\"\"transform object and string\"\"\"\n\n def pickle_dumps_to_str(cls, obj):\n \"\"\"from object to str\"\"\"\n <|body_0|>\n\n def pickle_loads_from_str(cls, obj_str):\n \"\"\"from str to object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ObjectTransform:\n \"\"\"transform object and string\"\"\"\n\n def pickle_dumps_to_str(cls, obj):\n \"\"\"from object to str\"\"\"\n try:\n return base64.b64encode(pickle.dumps(obj)).decode()\n except pickle.PicklingError:\n pass\n\n def pickle_loads_from_str(cls, obj_str):\n \"\"\"from str to object\"\"\"\n try:\n return pickle.loads(base64.b64decode(obj_str.encode()))\n except pickle.UnpicklingError:\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "ST_DM/KDD2021-MSTPAC/code/MST-PAC/utils/object_transform.py", "source_repo": "sserdoubleh/Research", "split": "val", "star_events_count": 10} {"blob_id": "67ae5c40dee00aed03cd47bea2f3706735148b56", "bodies": ["nodes = [root]\nres = []\nwhile any(nodes):\n nodes = [node for node in nodes if node]\n res.append(max([node.val for node in nodes]))\n nodes = [n for node in nodes for n in (node.left, node.right)]\nreturn res", "if not root:\n return []\nnodes = [(root, 0)]\nhigh_level = 0\ndict = {}\nwhile nodes:\n cur, h = nodes.pop(0)\n if h not in dict:\n dict[h] = [cur.val]\n else:\n dict[h].append(cur.val)\n if h > high_level:\n high_level = h\n if cur.left:\n nodes.append((cur.left, h + 1))\n if cur.right:\n nodes.append((cur.right, h + 1))\nres = [max(value) for value in dict.values()]\nreturn res"], "bodies_text": "<|body_start_0|>\n nodes = [root]\n res = []\n while any(nodes):\n nodes = [node for node in nodes if node]\n res.append(max([node.val for node in nodes]))\n nodes = [n for node in nodes for n in (node.left, node.right)]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n nodes = [(root, 0)]\n high_level = 0\n dict = {}\n while nodes:\n cur, h = nodes.pop(0)\n if h not in dict:\n dict[h] = [cur.val]\n else:\n dict[h].append(cur.val)\n if h > high_level:\n high_level = h\n if cur.left:\n nodes.append((cur.left, h + 1))\n if cur.right:\n nodes.append((cur.right, h + 1))\n res = [max(value) for value in dict.values()]\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def largestValues(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_0|>\n\n def largestValues2(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nodes = [root]\n res = []\n while any(nodes):\n nodes = [node for node in nodes if node]\n res.append(max([node.val for node in nodes]))\n nodes = [n for node in nodes for n in (node.left, node.right)]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n nodes = [(root, 0)]\n high_level = 0\n dict = {}\n while nodes:\n cur, h = nodes.pop(0)\n if h not in dict:\n dict[h] = [cur.val]\n else:\n dict[h].append(cur.val)\n if h > high_level:\n high_level = h\n if cur.left:\n nodes.append((cur.left, h + 1))\n if cur.right:\n nodes.append((cur.right, h + 1))\n res = [max(value) for value in dict.values()]\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000376", "length_bytes": 1901, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: List[int]", "name": "largestValues", "signature": "def largestValues(self, root)"}, {"docstring": ":type root: TreeNode :rtype: List[int]", "name": "largestValues2", "signature": "def largestValues2(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005040", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def largestValues(self, root): :type root: TreeNode :rtype: List[int]\n- def largestValues2(self, root): :type root: TreeNode :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def largestValues(self, root): :type root: TreeNode :rtype: List[int]\n- def largestValues2(self, root): :type root: TreeNode :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def largestValues(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_0|>\n\n def largestValues2(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n nodes = [root]\n res = []\n while any(nodes):\n nodes = [node for node in nodes if node]\n res.append(max([node.val for node in nodes]))\n nodes = [n for node in nodes for n in (node.left, node.right)]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n nodes = [(root, 0)]\n high_level = 0\n dict = {}\n while nodes:\n cur, h = nodes.pop(0)\n if h not in dict:\n dict[h] = [cur.val]\n else:\n dict[h].append(cur.val)\n if h > high_level:\n high_level = h\n if cur.left:\n nodes.append((cur.left, h + 1))\n if cur.right:\n nodes.append((cur.right, h + 1))\n res = [max(value) for value in dict.values()]\n return res\n<|end_body_1|>\n", "revision_id": "0fc4c7af59246e3064db41989a45d9db413a624b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def largestValues(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_0|>\n\n def largestValues2(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def largestValues(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n nodes = [root]\n res = []\n while any(nodes):\n nodes = [node for node in nodes if node]\n res.append(max([node.val for node in nodes]))\n nodes = [n for node in nodes for n in (node.left, node.right)]\n return res\n\n def largestValues2(self, root):\n \"\"\":type root: TreeNode :rtype: List[int]\"\"\"\n if not root:\n return []\n nodes = [(root, 0)]\n high_level = 0\n dict = {}\n while nodes:\n cur, h = nodes.pop(0)\n if h not in dict:\n dict[h] = [cur.val]\n else:\n dict[h].append(cur.val)\n if h > high_level:\n high_level = h\n if cur.left:\n nodes.append((cur.left, h + 1))\n if cur.right:\n nodes.append((cur.right, h + 1))\n res = [max(value) for value in dict.values()]\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "515. Find Largest Value in Each Tree Row/largest.py", "source_repo": "Macielyoung/LeetCode", "split": "val", "star_events_count": 1} {"blob_id": "5ac302d461e2c079be025e725c736f632e870d54", "bodies": ["self.commander_window = commander_window\nself.font = QtGui.QFont(self)\nself.font.setBold(True)\nself.font.setWeight(75)\nself.setup_footer_panel()", "self.create_footer_push_button('F3 View', 'F3')\nself.create_footer_push_button('F4 Edit', 'F4')\nself.create_footer_push_button('F5 Copy', 'F5')\nself.create_footer_push_button('F6 move', 'F6')\nself.create_footer_push_button('F7 New Folder', 'F7')\nself.create_footer_push_button('F8 Delete', 'F8')\nself.create_footer_push_button('ALT+F4 Exit')", "button = QtGui.QPushButton(self.commander_window.footer_container)\nbutton.setFont(self.font)\nbutton.setText(text)\nif shortcut is not None:\n button.setShortcut(shortcut)\nif connection is not None:\n button.triggered.connect(connection)\nself.commander_window.footer_layout.addWidget(button)\nreturn button"], "bodies_text": "<|body_start_0|>\n self.commander_window = commander_window\n self.font = QtGui.QFont(self)\n self.font.setBold(True)\n self.font.setWeight(75)\n self.setup_footer_panel()\n<|end_body_0|>\n\n<|body_start_1|>\n self.create_footer_push_button('F3 View', 'F3')\n self.create_footer_push_button('F4 Edit', 'F4')\n self.create_footer_push_button('F5 Copy', 'F5')\n self.create_footer_push_button('F6 move', 'F6')\n self.create_footer_push_button('F7 New Folder', 'F7')\n self.create_footer_push_button('F8 Delete', 'F8')\n self.create_footer_push_button('ALT+F4 Exit')\n<|end_body_1|>\n\n<|body_start_2|>\n button = QtGui.QPushButton(self.commander_window.footer_container)\n button.setFont(self.font)\n button.setText(text)\n if shortcut is not None:\n button.setShortcut(shortcut)\n if connection is not None:\n button.triggered.connect(connection)\n self.commander_window.footer_layout.addWidget(button)\n return button\n<|end_body_2|>\n", "class_docstring": "", "class_name": "WindowFooterPanel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WindowFooterPanel:\n\n def __init__(self, commander_window):\n \"\"\"constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\"\"\"\n <|body_0|>\n\n def setup_footer_panel(self):\n \"\"\"This method is meant to create all the footer elements on this class used only from constructor\"\"\"\n <|body_1|>\n\n def create_footer_push_button(self, text, shortcut=None, connection=None):\n \"\"\"This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.commander_window = commander_window\n self.font = QtGui.QFont(self)\n self.font.setBold(True)\n self.font.setWeight(75)\n self.setup_footer_panel()\n<|end_body_0|>\n\n<|body_start_1|>\n self.create_footer_push_button('F3 View', 'F3')\n self.create_footer_push_button('F4 Edit', 'F4')\n self.create_footer_push_button('F5 Copy', 'F5')\n self.create_footer_push_button('F6 move', 'F6')\n self.create_footer_push_button('F7 New Folder', 'F7')\n self.create_footer_push_button('F8 Delete', 'F8')\n self.create_footer_push_button('ALT+F4 Exit')\n<|end_body_1|>\n\n<|body_start_2|>\n button = QtGui.QPushButton(self.commander_window.footer_container)\n button.setFont(self.font)\n button.setText(text)\n if shortcut is not None:\n button.setShortcut(shortcut)\n if connection is not None:\n button.triggered.connect(connection)\n self.commander_window.footer_layout.addWidget(button)\n return button\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000377", "length_bytes": 2089, "license_type": "no_license", "methods": [{"docstring": "constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class", "name": "__init__", "signature": "def __init__(self, commander_window)"}, {"docstring": "This method is meant to create all the footer elements on this class used only from constructor", "name": "setup_footer_panel", "signature": "def setup_footer_panel(self)"}, {"docstring": "This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)", "name": "create_footer_push_button", "signature": "def create_footer_push_button(self, text, shortcut=None, connection=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006824", "prompt": "Implement the Python class `WindowFooterPanel` described below.\n\nClass description:\nImplement the WindowFooterPanel class.\n\nMethod signatures and docstrings:\n- def __init__(self, commander_window): constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\n- def setup_footer_panel(self): This method is meant to create all the footer elements on this class used only from constructor\n- def create_footer_push_button(self, text, shortcut=None, connection=None): This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)", "prompted_full_text": "Implement the Python class `WindowFooterPanel` described below.\n\nClass description:\nImplement the WindowFooterPanel class.\n\nMethod signatures and docstrings:\n- def __init__(self, commander_window): constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\n- def setup_footer_panel(self): This method is meant to create all the footer elements on this class used only from constructor\n- def create_footer_push_button(self, text, shortcut=None, connection=None): This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)\n\n<|skeleton|>\nclass WindowFooterPanel:\n\n def __init__(self, commander_window):\n \"\"\"constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\"\"\"\n <|body_0|>\n\n def setup_footer_panel(self):\n \"\"\"This method is meant to create all the footer elements on this class used only from constructor\"\"\"\n <|body_1|>\n\n def create_footer_push_button(self, text, shortcut=None, connection=None):\n \"\"\"This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.commander_window = commander_window\n self.font = QtGui.QFont(self)\n self.font.setBold(True)\n self.font.setWeight(75)\n self.setup_footer_panel()\n<|end_body_0|>\n\n<|body_start_1|>\n self.create_footer_push_button('F3 View', 'F3')\n self.create_footer_push_button('F4 Edit', 'F4')\n self.create_footer_push_button('F5 Copy', 'F5')\n self.create_footer_push_button('F6 move', 'F6')\n self.create_footer_push_button('F7 New Folder', 'F7')\n self.create_footer_push_button('F8 Delete', 'F8')\n self.create_footer_push_button('ALT+F4 Exit')\n<|end_body_1|>\n\n<|body_start_2|>\n button = QtGui.QPushButton(self.commander_window.footer_container)\n button.setFont(self.font)\n button.setText(text)\n if shortcut is not None:\n button.setShortcut(shortcut)\n if connection is not None:\n button.triggered.connect(connection)\n self.commander_window.footer_layout.addWidget(button)\n return button\n<|end_body_2|>\n", "revision_id": "5f7ab5b39c1dc7d8d2182048c5d8eaff04de3d06", "skeleton": "<|skeleton|>\nclass WindowFooterPanel:\n\n def __init__(self, commander_window):\n \"\"\"constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\"\"\"\n <|body_0|>\n\n def setup_footer_panel(self):\n \"\"\"This method is meant to create all the footer elements on this class used only from constructor\"\"\"\n <|body_1|>\n\n def create_footer_push_button(self, text, shortcut=None, connection=None):\n \"\"\"This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WindowFooterPanel:\n def __init__(self, commander_window):\n \"\"\"constructor initialize all footer panel elements Keyword arguments: :param commander_window: an initialized instance (parent main window) of CommanderWindow class\"\"\"\n self.commander_window = commander_window\n self.font = QtGui.QFont(self)\n self.font.setBold(True)\n self.font.setWeight(75)\n self.setup_footer_panel()\n\n def setup_footer_panel(self):\n \"\"\"This method is meant to create all the footer elements on this class used only from constructor\"\"\"\n self.create_footer_push_button('F3 View', 'F3')\n self.create_footer_push_button('F4 Edit', 'F4')\n self.create_footer_push_button('F5 Copy', 'F5')\n self.create_footer_push_button('F6 move', 'F6')\n self.create_footer_push_button('F7 New Folder', 'F7')\n self.create_footer_push_button('F8 Delete', 'F8')\n self.create_footer_push_button('ALT+F4 Exit')\n\n def create_footer_push_button(self, text, shortcut=None, connection=None):\n \"\"\"This method will create a button into footer layout Keyword arguments: text -- text to be set in the action (ie. \"View\") :param shortcut: string sequence of keys or single key for shortcut (ie. \"F3\", default None) :param connection: connection method that will be triggered when Action is clicked (default: None)\"\"\"\n button = QtGui.QPushButton(self.commander_window.footer_container)\n button.setFont(self.font)\n button.setText(text)\n if shortcut is not None:\n button.setShortcut(shortcut)\n if connection is not None:\n button.triggered.connect(connection)\n self.commander_window.footer_layout.addWidget(button)\n return button\n", "source": "the_stack_v2_python_sparse", "source_path": "views/window/window_footer_panel.py", "source_repo": "jafi666/pyCommander", "split": "val", "star_events_count": 0} {"blob_id": "40321697c67d4f48ca700104bf730960e489037c", "bodies": ["result = 0\nfor index, value in enumerate(nums):\n if value == val:\n continue\n if result != index:\n nums[result] = value\n result += 1\nreturn result", "start, end = (0, len(nums) - 1)\nwhile start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = (nums[end], nums[start], end - 1)\n else:\n start += 1\nreturn start"], "bodies_text": "<|body_start_0|>\n result = 0\n for index, value in enumerate(nums):\n if value == val:\n continue\n if result != index:\n nums[result] = value\n result += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(nums) - 1)\n while start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = (nums[end], nums[start], end - 1)\n else:\n start += 1\n return start\n<|end_body_1|>\n", "class_docstring": "给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\"\"\"\n\n def symb(self, nums, val):\n \"\"\":type nums: List[int] :type val: int :rtype: int\"\"\"\n <|body_0|>\n\n def removeElement(self, nums, val):\n \"\"\"从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n for index, value in enumerate(nums):\n if value == val:\n continue\n if result != index:\n nums[result] = value\n result += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(nums) - 1)\n while start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = (nums[end], nums[start], end - 1)\n else:\n start += 1\n return start\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000378", "length_bytes": 3154, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type val: int :rtype: int", "name": "symb", "signature": "def symb(self, nums, val)"}, {"docstring": "从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.", "name": "removeElement", "signature": "def removeElement(self, nums, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001795", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\n给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\n\nMethod signatures and docstrings:\n- def symb(self, nums, val): :type nums: List[int] :type val: int :rtype: int\n- def removeElement(self, nums, val): 从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\n给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\n\nMethod signatures and docstrings:\n- def symb(self, nums, val): :type nums: List[int] :type val: int :rtype: int\n- def removeElement(self, nums, val): 从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.\n\n<|skeleton|>\nclass Solution:\n \"\"\"给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\"\"\"\n\n def symb(self, nums, val):\n \"\"\":type nums: List[int] :type val: int :rtype: int\"\"\"\n <|body_0|>\n\n def removeElement(self, nums, val):\n \"\"\"从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = 0\n for index, value in enumerate(nums):\n if value == val:\n continue\n if result != index:\n nums[result] = value\n result += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n start, end = (0, len(nums) - 1)\n while start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = (nums[end], nums[start], end - 1)\n else:\n start += 1\n return start\n<|end_body_1|>\n", "revision_id": "7a6de1767eaabb6464ea4c90756606d59b868d7c", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\"\"\"\n\n def symb(self, nums, val):\n \"\"\":type nums: List[int] :type val: int :rtype: int\"\"\"\n <|body_0|>\n\n def removeElement(self, nums, val):\n \"\"\"从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n \"\"\"给出一个列表和一个值,从列表中删除给定的值,返回最终的数组长度 Given an array nums and a value val, remove all instances of that value in-place and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. The order of elements can be changed. It doesn't matter what you leave beyond the new length. Example 1: Given nums = [3,2,2,3], val = 3, Your function should return length = 2, with the first two elements of nums being 2. It doesn't matter what you leave beyond the returned length. Example 2: Given nums = [0,1,2,2,3,0,4,2], val = 2, Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0\"\"\"\n\n def symb(self, nums, val):\n \"\"\":type nums: List[int] :type val: int :rtype: int\"\"\"\n result = 0\n for index, value in enumerate(nums):\n if value == val:\n continue\n if result != index:\n nums[result] = value\n result += 1\n return result\n\n def removeElement(self, nums, val):\n \"\"\"从左开始往右移动,每碰到一个目标值,就将其交换到列表尾部 Starting from the left every time we find a value that is the target value we swap it out with an item starting from the right. We decrement end each time as we know that the final item is the target value and only increment start once we know the value is ok. Once start reaches end we know all items after that point are the target value so we can stop there.\"\"\"\n start, end = (0, len(nums) - 1)\n while start <= end:\n if nums[start] == val:\n nums[start], nums[end], end = (nums[end], nums[start], end - 1)\n else:\n start += 1\n return start\n", "source": "the_stack_v2_python_sparse", "source_path": "demo/27.RemoveElement.py", "source_repo": "symbooo/LeetCodeSymb", "split": "val", "star_events_count": 0} {"blob_id": "588f9dbc549cf45e8aa3ef02de3105314dbc53fa", "bodies": ["if ty == 'trades':\n reader = BinReader(filePath, '>QIIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append(now[3])\nelif ty == 'quotes':\n reader = BinReader(filePath, '>QIIfIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append((now[3] + now[5]) / 2)", "xInMiliSec = 60 * 1000 * x\nr = []\nfind_t = self._ts[0] + xInMiliSec\npre_t = find_t\npre_p = self._pr[0]\nbase_t = self._ts[0]\nbase_p = self._pr[0]\nfor i, p in enumerate(self._pr):\n if self._ts[i] > find_t:\n if base_t == pre_t:\n find_t = self._ts[i] + xInMiliSec\n else:\n r.append(pre_p / base_p - 1)\n find_t += xInMiliSec\n base_t = self._ts[i]\n base_p = p\n elif self._ts[i] == find_t:\n r.append(p / base_p - 1)\n base_p = p\n find_t += xInMiliSec\n pre_t = self._ts\n pre_p = p\nreturn r"], "bodies_text": "<|body_start_0|>\n if ty == 'trades':\n reader = BinReader(filePath, '>QIIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append(now[3])\n elif ty == 'quotes':\n reader = BinReader(filePath, '>QIIfIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append((now[3] + now[5]) / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n xInMiliSec = 60 * 1000 * x\n r = []\n find_t = self._ts[0] + xInMiliSec\n pre_t = find_t\n pre_p = self._pr[0]\n base_t = self._ts[0]\n base_p = self._pr[0]\n for i, p in enumerate(self._pr):\n if self._ts[i] > find_t:\n if base_t == pre_t:\n find_t = self._ts[i] + xInMiliSec\n else:\n r.append(pre_p / base_p - 1)\n find_t += xInMiliSec\n base_t = self._ts[i]\n base_p = p\n elif self._ts[i] == find_t:\n r.append(p / base_p - 1)\n base_p = p\n find_t += xInMiliSec\n pre_t = self._ts\n pre_p = p\n return r\n<|end_body_1|>\n", "class_docstring": "A class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data", "class_name": "Computer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Computer:\n \"\"\"A class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\"\"\"\n\n def __init__(self, filePath, ty):\n \"\"\"Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\"\"\"\n <|body_0|>\n\n def computeReturn(self, x):\n \"\"\"Compute the X-minute return Parameters ------ x : int Minute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if ty == 'trades':\n reader = BinReader(filePath, '>QIIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append(now[3])\n elif ty == 'quotes':\n reader = BinReader(filePath, '>QIIfIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append((now[3] + now[5]) / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n xInMiliSec = 60 * 1000 * x\n r = []\n find_t = self._ts[0] + xInMiliSec\n pre_t = find_t\n pre_p = self._pr[0]\n base_t = self._ts[0]\n base_p = self._pr[0]\n for i, p in enumerate(self._pr):\n if self._ts[i] > find_t:\n if base_t == pre_t:\n find_t = self._ts[i] + xInMiliSec\n else:\n r.append(pre_p / base_p - 1)\n find_t += xInMiliSec\n base_t = self._ts[i]\n base_p = p\n elif self._ts[i] == find_t:\n r.append(p / base_p - 1)\n base_p = p\n find_t += xInMiliSec\n pre_t = self._ts\n pre_p = p\n return r\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000379", "length_bytes": 2886, "license_type": "no_license", "methods": [{"docstring": "Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'", "name": "__init__", "signature": "def __init__(self, filePath, ty)"}, {"docstring": "Compute the X-minute return Parameters ------ x : int Minute", "name": "computeReturn", "signature": "def computeReturn(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005427", "prompt": "Implement the Python class `Computer` described below.\n\nClass description:\nA class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\n\nMethod signatures and docstrings:\n- def __init__(self, filePath, ty): Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\n- def computeReturn(self, x): Compute the X-minute return Parameters ------ x : int Minute", "prompted_full_text": "Implement the Python class `Computer` described below.\n\nClass description:\nA class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\n\nMethod signatures and docstrings:\n- def __init__(self, filePath, ty): Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\n- def computeReturn(self, x): Compute the X-minute return Parameters ------ x : int Minute\n\n<|skeleton|>\nclass Computer:\n \"\"\"A class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\"\"\"\n\n def __init__(self, filePath, ty):\n \"\"\"Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\"\"\"\n <|body_0|>\n\n def computeReturn(self, x):\n \"\"\"Compute the X-minute return Parameters ------ x : int Minute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if ty == 'trades':\n reader = BinReader(filePath, '>QIIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append(now[3])\n elif ty == 'quotes':\n reader = BinReader(filePath, '>QIIfIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append((now[3] + now[5]) / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n xInMiliSec = 60 * 1000 * x\n r = []\n find_t = self._ts[0] + xInMiliSec\n pre_t = find_t\n pre_p = self._pr[0]\n base_t = self._ts[0]\n base_p = self._pr[0]\n for i, p in enumerate(self._pr):\n if self._ts[i] > find_t:\n if base_t == pre_t:\n find_t = self._ts[i] + xInMiliSec\n else:\n r.append(pre_p / base_p - 1)\n find_t += xInMiliSec\n base_t = self._ts[i]\n base_p = p\n elif self._ts[i] == find_t:\n r.append(p / base_p - 1)\n base_p = p\n find_t += xInMiliSec\n pre_t = self._ts\n pre_p = p\n return r\n<|end_body_1|>\n", "revision_id": "4aabbb41b2e9ce18172e010527c59d53ffb95984", "skeleton": "<|skeleton|>\nclass Computer:\n \"\"\"A class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\"\"\"\n\n def __init__(self, filePath, ty):\n \"\"\"Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\"\"\"\n <|body_0|>\n\n def computeReturn(self, x):\n \"\"\"Compute the X-minute return Parameters ------ x : int Minute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Computer:\n \"\"\"A class to compute X-minute returns of trades and mid-quotes Attributes ------ _ts : list A list of time stamps from the data _pr : list A list of prices from the data Methods ------ computeReturn(x) Return the X-time lag return of the trade price/mid quote price for trade/quote data\"\"\"\n\n def __init__(self, filePath, ty):\n \"\"\"Parameters ------ filePath : str The file path of the data ty : str type of the data, either 'trades' or 'quotes'\"\"\"\n if ty == 'trades':\n reader = BinReader(filePath, '>QIIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append(now[3])\n elif ty == 'quotes':\n reader = BinReader(filePath, '>QIIfIf', 100)\n self._ts = []\n self._pr = []\n while reader.hasNext():\n now = reader.next()\n self._ts.append(now[0])\n self._pr.append((now[3] + now[5]) / 2)\n\n def computeReturn(self, x):\n \"\"\"Compute the X-minute return Parameters ------ x : int Minute\"\"\"\n xInMiliSec = 60 * 1000 * x\n r = []\n find_t = self._ts[0] + xInMiliSec\n pre_t = find_t\n pre_p = self._pr[0]\n base_t = self._ts[0]\n base_p = self._pr[0]\n for i, p in enumerate(self._pr):\n if self._ts[i] > find_t:\n if base_t == pre_t:\n find_t = self._ts[i] + xInMiliSec\n else:\n r.append(pre_p / base_p - 1)\n find_t += xInMiliSec\n base_t = self._ts[i]\n base_p = p\n elif self._ts[i] == find_t:\n r.append(p / base_p - 1)\n base_p = p\n find_t += xInMiliSec\n pre_t = self._ts\n pre_p = p\n return r\n", "source": "the_stack_v2_python_sparse", "source_path": "Homework1/PartB/ReturnComputer.py", "source_repo": "nateehuang/AlgorTradingGithub", "split": "val", "star_events_count": 0} {"blob_id": "135ec78fe3799f98774f33ad18c15172e93e9447", "bodies": ["common_flags.consumer_service_flag(suffix='to disable').AddToParser(parser)\nbase.ASYNC_FLAG.AddToParser(parser)\nparser.add_argument('--force', action='store_true', help='If specified, the disable call will proceed even if there are enabled services which depend on the service to be disabled. Forcing the call means that the services which depend on the service to be disabled will also be disabled.')", "project = properties.VALUES.core.project.Get(required=True)\nfor service_name in args.service:\n service_name = arg_parsers.GetServiceNameFromArg(service_name)\n protected_msg = serviceusage.GetProtectedServiceWarning(service_name)\n if protected_msg:\n if args.IsSpecified('quiet'):\n raise console_io.RequiredPromptError()\n do_disable = console_io.PromptContinue(protected_msg, default=False, throw_if_unattended=True)\n if not do_disable:\n continue\n op = serviceusage.DisableApiCall(project, service_name, args.force)\n if op.done:\n return\n if args.async_:\n cmd = OP_WAIT_CMD.format(op.name)\n log.status.Print('Asynchronous operation is in progress... Use the following command to wait for its completion:\\n {0}'.format(cmd))\n return\n op = services_util.WaitOperation(op.name, serviceusage.GetOperation)\n services_util.PrintOperation(op)"], "bodies_text": "<|body_start_0|>\n common_flags.consumer_service_flag(suffix='to disable').AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--force', action='store_true', help='If specified, the disable call will proceed even if there are enabled services which depend on the service to be disabled. Forcing the call means that the services which depend on the service to be disabled will also be disabled.')\n<|end_body_0|>\n\n<|body_start_1|>\n project = properties.VALUES.core.project.Get(required=True)\n for service_name in args.service:\n service_name = arg_parsers.GetServiceNameFromArg(service_name)\n protected_msg = serviceusage.GetProtectedServiceWarning(service_name)\n if protected_msg:\n if args.IsSpecified('quiet'):\n raise console_io.RequiredPromptError()\n do_disable = console_io.PromptContinue(protected_msg, default=False, throw_if_unattended=True)\n if not do_disable:\n continue\n op = serviceusage.DisableApiCall(project, service_name, args.force)\n if op.done:\n return\n if args.async_:\n cmd = OP_WAIT_CMD.format(op.name)\n log.status.Print('Asynchronous operation is in progress... Use the following command to wait for its completion:\\n {0}'.format(cmd))\n return\n op = services_util.WaitOperation(op.name, serviceusage.GetOperation)\n services_util.PrintOperation(op)\n<|end_body_1|>\n", "class_docstring": "Disable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async", "class_name": "Disable", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Disable:\n \"\"\"Disable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n common_flags.consumer_service_flag(suffix='to disable').AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--force', action='store_true', help='If specified, the disable call will proceed even if there are enabled services which depend on the service to be disabled. Forcing the call means that the services which depend on the service to be disabled will also be disabled.')\n<|end_body_0|>\n\n<|body_start_1|>\n project = properties.VALUES.core.project.Get(required=True)\n for service_name in args.service:\n service_name = arg_parsers.GetServiceNameFromArg(service_name)\n protected_msg = serviceusage.GetProtectedServiceWarning(service_name)\n if protected_msg:\n if args.IsSpecified('quiet'):\n raise console_io.RequiredPromptError()\n do_disable = console_io.PromptContinue(protected_msg, default=False, throw_if_unattended=True)\n if not do_disable:\n continue\n op = serviceusage.DisableApiCall(project, service_name, args.force)\n if op.done:\n return\n if args.async_:\n cmd = OP_WAIT_CMD.format(op.name)\n log.status.Print('Asynchronous operation is in progress... Use the following command to wait for its completion:\\n {0}'.format(cmd))\n return\n op = services_util.WaitOperation(op.name, serviceusage.GetOperation)\n services_util.PrintOperation(op)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000380", "length_bytes": 4048, "license_type": "permissive", "methods": [{"docstring": "Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006035", "prompt": "Implement the Python class `Disable` described below.\n\nClass description:\nDisable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.", "prompted_full_text": "Implement the Python class `Disable` described below.\n\nClass description:\nDisable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.\n\n<|skeleton|>\nclass Disable:\n \"\"\"Disable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n common_flags.consumer_service_flag(suffix='to disable').AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--force', action='store_true', help='If specified, the disable call will proceed even if there are enabled services which depend on the service to be disabled. Forcing the call means that the services which depend on the service to be disabled will also be disabled.')\n<|end_body_0|>\n\n<|body_start_1|>\n project = properties.VALUES.core.project.Get(required=True)\n for service_name in args.service:\n service_name = arg_parsers.GetServiceNameFromArg(service_name)\n protected_msg = serviceusage.GetProtectedServiceWarning(service_name)\n if protected_msg:\n if args.IsSpecified('quiet'):\n raise console_io.RequiredPromptError()\n do_disable = console_io.PromptContinue(protected_msg, default=False, throw_if_unattended=True)\n if not do_disable:\n continue\n op = serviceusage.DisableApiCall(project, service_name, args.force)\n if op.done:\n return\n if args.async_:\n cmd = OP_WAIT_CMD.format(op.name)\n log.status.Print('Asynchronous operation is in progress... Use the following command to wait for its completion:\\n {0}'.format(cmd))\n return\n op = services_util.WaitOperation(op.name, serviceusage.GetOperation)\n services_util.PrintOperation(op)\n<|end_body_1|>\n", "revision_id": "85bb264e273568b5a0408f733b403c56373e2508", "skeleton": "<|skeleton|>\nclass Disable:\n \"\"\"Disable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Disable:\n \"\"\"Disable a service for consumption for a project. This command disables one or more previously-enabled services for consumption. To see a list of the enabled services for a project, run: $ {parent_command} list More information on listing services can be found at: https://cloud.google.com/service-usage/docs/list-services and on disabling a service at: https://cloud.google.com/service-usage/docs/enable-disable ## EXAMPLES To disable a service called `my-consumed-service` for the active project, run: $ {command} my-consumed-service To run the same command asynchronously (non-blocking), run: $ {command} my-consumed-service --async\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n common_flags.consumer_service_flag(suffix='to disable').AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--force', action='store_true', help='If specified, the disable call will proceed even if there are enabled services which depend on the service to be disabled. Forcing the call means that the services which depend on the service to be disabled will also be disabled.')\n\n def Run(self, args):\n \"\"\"Run 'services disable'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: Nothing.\"\"\"\n project = properties.VALUES.core.project.Get(required=True)\n for service_name in args.service:\n service_name = arg_parsers.GetServiceNameFromArg(service_name)\n protected_msg = serviceusage.GetProtectedServiceWarning(service_name)\n if protected_msg:\n if args.IsSpecified('quiet'):\n raise console_io.RequiredPromptError()\n do_disable = console_io.PromptContinue(protected_msg, default=False, throw_if_unattended=True)\n if not do_disable:\n continue\n op = serviceusage.DisableApiCall(project, service_name, args.force)\n if op.done:\n return\n if args.async_:\n cmd = OP_WAIT_CMD.format(op.name)\n log.status.Print('Asynchronous operation is in progress... Use the following command to wait for its completion:\\n {0}'.format(cmd))\n return\n op = services_util.WaitOperation(op.name, serviceusage.GetOperation)\n services_util.PrintOperation(op)\n", "source": "the_stack_v2_python_sparse", "source_path": "google-cloud-sdk/lib/surface/services/disable.py", "source_repo": "bopopescu/socialliteapp", "split": "val", "star_events_count": 0} {"blob_id": "eccde246da6de1858ae7c1d888a31f1151cb821c", "bodies": ["super(TfdsInput, self).__init__(*args, **kwargs)\nself.dataset_name = dataset_name\nself.split = split\nself.data_dir = data_dir", "if 'image' in value:\n images = value['image']\nelif 'video' in value:\n images = value['video']\nelse:\n raise ValueError('No \"image\" or \"video\" key found in TFDS datum')\nreturn InputData(images=images, labels=value['label'])", "split = self.split\nif self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\nreturn tfds.load(name=self.dataset_name, split=split, data_dir=self.data_dir, shuffle_files=False)"], "bodies_text": "<|body_start_0|>\n super(TfdsInput, self).__init__(*args, **kwargs)\n self.dataset_name = dataset_name\n self.split = split\n self.data_dir = data_dir\n<|end_body_0|>\n\n<|body_start_1|>\n if 'image' in value:\n images = value['image']\n elif 'video' in value:\n images = value['video']\n else:\n raise ValueError('No \"image\" or \"video\" key found in TFDS datum')\n return InputData(images=images, labels=value['label'])\n<|end_body_1|>\n\n<|body_start_2|>\n split = self.split\n if self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\n return tfds.load(name=self.dataset_name, split=split, data_dir=self.data_dir, shuffle_files=False)\n<|end_body_2|>\n", "class_docstring": "Generates an input_fn that works with TFDS datasets.", "class_name": "TfdsInput", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TfdsInput:\n \"\"\"Generates an input_fn that works with TFDS datasets.\"\"\"\n\n def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs):\n \"\"\"Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\"\"\"\n <|body_0|>\n\n def dataset_parser(self, value):\n \"\"\"Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\"\"\"\n <|body_1|>\n\n def make_source_dataset(self, current_host_index, num_hosts):\n \"\"\"Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TfdsInput, self).__init__(*args, **kwargs)\n self.dataset_name = dataset_name\n self.split = split\n self.data_dir = data_dir\n<|end_body_0|>\n\n<|body_start_1|>\n if 'image' in value:\n images = value['image']\n elif 'video' in value:\n images = value['video']\n else:\n raise ValueError('No \"image\" or \"video\" key found in TFDS datum')\n return InputData(images=images, labels=value['label'])\n<|end_body_1|>\n\n<|body_start_2|>\n split = self.split\n if self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\n return tfds.load(name=self.dataset_name, split=split, data_dir=self.data_dir, shuffle_files=False)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000381", "length_bytes": 16226, "license_type": "permissive", "methods": [{"docstring": "Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.", "name": "__init__", "signature": "def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs)"}, {"docstring": "Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.", "name": "dataset_parser", "signature": "def dataset_parser(self, value)"}, {"docstring": "Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.", "name": "make_source_dataset", "signature": "def make_source_dataset(self, current_host_index, num_hosts)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000272", "prompt": "Implement the Python class `TfdsInput` described below.\n\nClass description:\nGenerates an input_fn that works with TFDS datasets.\n\nMethod signatures and docstrings:\n- def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs): Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\n- def dataset_parser(self, value): Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\n- def make_source_dataset(self, current_host_index, num_hosts): Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.", "prompted_full_text": "Implement the Python class `TfdsInput` described below.\n\nClass description:\nGenerates an input_fn that works with TFDS datasets.\n\nMethod signatures and docstrings:\n- def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs): Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\n- def dataset_parser(self, value): Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\n- def make_source_dataset(self, current_host_index, num_hosts): Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.\n\n<|skeleton|>\nclass TfdsInput:\n \"\"\"Generates an input_fn that works with TFDS datasets.\"\"\"\n\n def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs):\n \"\"\"Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\"\"\"\n <|body_0|>\n\n def dataset_parser(self, value):\n \"\"\"Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\"\"\"\n <|body_1|>\n\n def make_source_dataset(self, current_host_index, num_hosts):\n \"\"\"Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TfdsInput, self).__init__(*args, **kwargs)\n self.dataset_name = dataset_name\n self.split = split\n self.data_dir = data_dir\n<|end_body_0|>\n\n<|body_start_1|>\n if 'image' in value:\n images = value['image']\n elif 'video' in value:\n images = value['video']\n else:\n raise ValueError('No \"image\" or \"video\" key found in TFDS datum')\n return InputData(images=images, labels=value['label'])\n<|end_body_1|>\n\n<|body_start_2|>\n split = self.split\n if self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\n return tfds.load(name=self.dataset_name, split=split, data_dir=self.data_dir, shuffle_files=False)\n<|end_body_2|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass TfdsInput:\n \"\"\"Generates an input_fn that works with TFDS datasets.\"\"\"\n\n def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs):\n \"\"\"Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\"\"\"\n <|body_0|>\n\n def dataset_parser(self, value):\n \"\"\"Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\"\"\"\n <|body_1|>\n\n def make_source_dataset(self, current_host_index, num_hosts):\n \"\"\"Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TfdsInput:\n \"\"\"Generates an input_fn that works with TFDS datasets.\"\"\"\n\n def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs):\n \"\"\"Creates an input_fn for a TFDS dataset. Args: dataset_name: The name of the TFDS dataset, passed as the `name` argument of tfds.load(). split: The split name passed as the `split` argument to tfds.load(). *args: Arguments passed on to CommonInput. data_dir: The directory passed as the `data_dir` argument to tfds.load(). **kwargs: Keyword arguments passed on to CommonInput.\"\"\"\n super(TfdsInput, self).__init__(*args, **kwargs)\n self.dataset_name = dataset_name\n self.split = split\n self.data_dir = data_dir\n\n def dataset_parser(self, value):\n \"\"\"Parses a TFDS datum tuple into an InputData instance. Args: value: A dictionary with keys 'image' (or 'video') and 'label'. Returns: An InputData consisting of an image and label. Note: If `value` contains a 'video' key, then the returned images entry will be set to it.\"\"\"\n if 'image' in value:\n images = value['image']\n elif 'video' in value:\n images = value['video']\n else:\n raise ValueError('No \"image\" or \"video\" key found in TFDS datum')\n return InputData(images=images, labels=value['label'])\n\n def make_source_dataset(self, current_host_index, num_hosts):\n \"\"\"Makes a dataset of dictionaries of images and labels. Args: current_host_index: current host index. num_hosts: total number of hosts. Returns: A `tf.data.Dataset` object where each dataset element is a dictionary. For image classification datasets, the dictionary will contain an 'image' key with a decoded uint8 image (of shape [height, width, channels]) and a 'label' key with an int64 label. For video classification datasets, the dictionary will contain a 'video' key with a decoded uint8 video (of shape [frames, height, width, channels]) and a 'label' key with an int64 label.\"\"\"\n split = self.split\n if self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\n return tfds.load(name=self.dataset_name, split=split, data_dir=self.data_dir, shuffle_files=False)\n", "source": "the_stack_v2_python_sparse", "source_path": "supcon/inputs.py", "source_repo": "Jimmy-INL/google-research", "split": "val", "star_events_count": 1} {"blob_id": "9315fa507033fe98749a343224ffdaa28fe43dda", "bodies": ["table = self.tables[table_name]\nupdate_dict = _clean_dict(update_dict, table.schema)\ntb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\nstep = step if step else table.nrows\nfor name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)", "table = self.tables[table_name]\nupdate_dict = _clean_dict(update_dict, table.schema)\ntb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\nstep = step if step else table.nrows\nfor name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\ntable.update_row(update_dict)", "if data_save_type == 'object':\n return self.tables[table].get_object(self.tables[table].df[key].iloc[iteration], **kwargs)\nelif data_save_type == 'state_dict':\n return self.tables[table].get_state_dict(self.tables[table].df[key].iloc[iteration], **kwargs)\nelif data_save_type == 'pickle':\n return self.tables[table].get_pickle(self.tables[table].df[key].iloc[iteration], **kwargs)\nelse:\n return self.tables[table].df[key].iloc[iteration]"], "bodies_text": "<|body_start_0|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n<|end_body_0|>\n\n<|body_start_1|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n table.update_row(update_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n if data_save_type == 'object':\n return self.tables[table].get_object(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'state_dict':\n return self.tables[table].get_state_dict(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'pickle':\n return self.tables[table].get_pickle(self.tables[table].df[key].iloc[iteration], **kwargs)\n else:\n return self.tables[table].df[key].iloc[iteration]\n<|end_body_2|>\n", "class_docstring": "The following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.", "class_name": "CustomStore", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomStore:\n \"\"\"The following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\"\"\"\n\n def log_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_0|>\n\n def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_1|>\n\n def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs):\n \"\"\"Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n<|end_body_0|>\n\n<|body_start_1|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n table.update_row(update_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n if data_save_type == 'object':\n return self.tables[table].get_object(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'state_dict':\n return self.tables[table].get_state_dict(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'pickle':\n return self.tables[table].get_pickle(self.tables[table].df[key].iloc[iteration], **kwargs)\n else:\n return self.tables[table].df[key].iloc[iteration]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000382", "length_bytes": 3794, "license_type": "no_license", "methods": [{"docstring": "Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint", "name": "log_tb", "signature": "def log_tb(self, table_name, update_dict, summary_type='scalar', step=None)"}, {"docstring": "Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint", "name": "log_table_and_tb", "signature": "def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None)"}, {"docstring": "Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:", "name": "load", "signature": "def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000406", "prompt": "Implement the Python class `CustomStore` described below.\n\nClass description:\nThe following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\n\nMethod signatures and docstrings:\n- def log_tb(self, table_name, update_dict, summary_type='scalar', step=None): Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\n- def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None): Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\n- def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs): Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:", "prompted_full_text": "Implement the Python class `CustomStore` described below.\n\nClass description:\nThe following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\n\nMethod signatures and docstrings:\n- def log_tb(self, table_name, update_dict, summary_type='scalar', step=None): Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\n- def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None): Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\n- def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs): Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:\n\n<|skeleton|>\nclass CustomStore:\n \"\"\"The following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\"\"\"\n\n def log_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_0|>\n\n def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_1|>\n\n def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs):\n \"\"\"Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n<|end_body_0|>\n\n<|body_start_1|>\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n table.update_row(update_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n if data_save_type == 'object':\n return self.tables[table].get_object(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'state_dict':\n return self.tables[table].get_state_dict(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'pickle':\n return self.tables[table].get_pickle(self.tables[table].df[key].iloc[iteration], **kwargs)\n else:\n return self.tables[table].df[key].iloc[iteration]\n<|end_body_2|>\n", "revision_id": "978de314897904c9014209d479c03dc3509f7dc0", "skeleton": "<|skeleton|>\nclass CustomStore:\n \"\"\"The following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\"\"\"\n\n def log_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_0|>\n\n def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n <|body_1|>\n\n def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs):\n \"\"\"Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomStore:\n \"\"\"The following class is derived from cox. https://github.com/MadryLab/cox/blob/master/cox/store.py Copyright (c) 2018 Andrew Ilyas, Logan Engstrom, licensed under the MIT license, cf. 3rd-party-licenses.txt file in the root directory of this source tree.\"\"\"\n\n def log_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to only tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n\n def log_table_and_tb(self, table_name, update_dict, summary_type='scalar', step=None):\n \"\"\"Log to a table and also a tensorboard. Args: table_name (str) : which table to log to update_dict (dict) : values to log and store as a dictionary of column mapping to value. summary_type (str) : what type of summary to log to tensorboard as step: which step index to insert datapoint\"\"\"\n table = self.tables[table_name]\n update_dict = _clean_dict(update_dict, table.schema)\n tb_func = getattr(self.tensorboard, 'add_%s' % summary_type)\n step = step if step else table.nrows\n for name, value in update_dict.items():\n tb_func('/'.join([table_name, name]), value, step)\n table.update_row(update_dict)\n\n def load(self, table: str, key: str, data_save_type: str, iteration: int=-1, **kwargs):\n \"\"\"Load data from store. table: name of table to load from key: key of value to load data_save_type: Type of the data to be loaded. One of 'object' 'state_dict', or 'pickle'. iteration: Iteration checkpoint to load kwargs: Returns:\"\"\"\n if data_save_type == 'object':\n return self.tables[table].get_object(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'state_dict':\n return self.tables[table].get_state_dict(self.tables[table].df[key].iloc[iteration], **kwargs)\n elif data_save_type == 'pickle':\n return self.tables[table].get_pickle(self.tables[table].df[key].iloc[iteration], **kwargs)\n else:\n return self.tables[table].df[key].iloc[iteration]\n", "source": "the_stack_v2_python_sparse", "source_path": "playground/trl/projections/custom_store.py", "source_repo": "NiklasFreymuth/bayesian-aggregation-for-swarm-reinforcement-learning", "split": "val", "star_events_count": 0} {"blob_id": "da22399d92212c0be8c22adfd67d186592213355", "bodies": ["format_convert = {'headsUp': ['invite', 'Heads Up'], 'available': ['invite', 'Available?'], 'leave': ['leave', 'Left?'], 'return': ['return', 'Returned?'], 'info': ['info', None], 'broadcast': ['broadcast', None], 'test': ['test', 'Test']}\ninitial = {}\ninitial['author'] = self.request.user.pk\ninitial['type'] = 'std_page'\ninitial['format'] = 'page'\nmembers = None\npage_format = self.request.GET.get('page_format', self.page_format)\nperiod_format = format_convert[page_format][0]\ninitial['period_format'] = period_format\nrsvp_name = format_convert[page_format][1]\nself.rsvp_template = None\nif rsvp_name is not None:\n try:\n self.rsvp_template = RsvpTemplate.objects.get(name=rsvp_name)\n initial['rsvp_template'] = self.rsvp_template\n except RsvpTemplate.DoesNotExist:\n logger.error('RsvpTemplate {} not found for format: {}'.format(rsvp_name, page_format))\nself.initial = initial\nreturn members", "context = super().get_context_data(**kwargs)\nif self.rsvp_template and self.initial['type'] != 'repage':\n self.initial['input'] = '{} {}'.format(self.initial['input'], self.rsvp_template.text)\ninstructions = {'invite': 'Page the team to invite them to the OP. Members already signed up still get a page.', 'info': 'Send an informational page to people signed up for the OP. No response expected.', 'broadcast': 'Send an informational page to the whole team. No response expected.', 'leave': 'Transit page to people signed up for the event. Responses will mark the participant as departed.', 'return': 'Transit page to people signed up for the event. Responses will mark the participant as returned home.', 'test': 'Test page. DO NOT USE IN A REAL CALLOUT.'}\nself.initial['instructions'] = instructions.get(self.initial['period_format'], 'WARNING: Unknown period_format')\nreturn {**context, **self.initial}"], "bodies_text": "<|body_start_0|>\n format_convert = {'headsUp': ['invite', 'Heads Up'], 'available': ['invite', 'Available?'], 'leave': ['leave', 'Left?'], 'return': ['return', 'Returned?'], 'info': ['info', None], 'broadcast': ['broadcast', None], 'test': ['test', 'Test']}\n initial = {}\n initial['author'] = self.request.user.pk\n initial['type'] = 'std_page'\n initial['format'] = 'page'\n members = None\n page_format = self.request.GET.get('page_format', self.page_format)\n period_format = format_convert[page_format][0]\n initial['period_format'] = period_format\n rsvp_name = format_convert[page_format][1]\n self.rsvp_template = None\n if rsvp_name is not None:\n try:\n self.rsvp_template = RsvpTemplate.objects.get(name=rsvp_name)\n initial['rsvp_template'] = self.rsvp_template\n except RsvpTemplate.DoesNotExist:\n logger.error('RsvpTemplate {} not found for format: {}'.format(rsvp_name, page_format))\n self.initial = initial\n return members\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n if self.rsvp_template and self.initial['type'] != 'repage':\n self.initial['input'] = '{} {}'.format(self.initial['input'], self.rsvp_template.text)\n instructions = {'invite': 'Page the team to invite them to the OP. Members already signed up still get a page.', 'info': 'Send an informational page to people signed up for the OP. No response expected.', 'broadcast': 'Send an informational page to the whole team. No response expected.', 'leave': 'Transit page to people signed up for the event. Responses will mark the participant as departed.', 'return': 'Transit page to people signed up for the event. Responses will mark the participant as returned home.', 'test': 'Test page. DO NOT USE IN A REAL CALLOUT.'}\n self.initial['instructions'] = instructions.get(self.initial['period_format'], 'WARNING: Unknown period_format')\n return {**context, **self.initial}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MessageCreateBaseView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MessageCreateBaseView:\n\n def get_queryset(self):\n \"\"\"Return context for standard paging.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional useful information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n format_convert = {'headsUp': ['invite', 'Heads Up'], 'available': ['invite', 'Available?'], 'leave': ['leave', 'Left?'], 'return': ['return', 'Returned?'], 'info': ['info', None], 'broadcast': ['broadcast', None], 'test': ['test', 'Test']}\n initial = {}\n initial['author'] = self.request.user.pk\n initial['type'] = 'std_page'\n initial['format'] = 'page'\n members = None\n page_format = self.request.GET.get('page_format', self.page_format)\n period_format = format_convert[page_format][0]\n initial['period_format'] = period_format\n rsvp_name = format_convert[page_format][1]\n self.rsvp_template = None\n if rsvp_name is not None:\n try:\n self.rsvp_template = RsvpTemplate.objects.get(name=rsvp_name)\n initial['rsvp_template'] = self.rsvp_template\n except RsvpTemplate.DoesNotExist:\n logger.error('RsvpTemplate {} not found for format: {}'.format(rsvp_name, page_format))\n self.initial = initial\n return members\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n if self.rsvp_template and self.initial['type'] != 'repage':\n self.initial['input'] = '{} {}'.format(self.initial['input'], self.rsvp_template.text)\n instructions = {'invite': 'Page the team to invite them to the OP. Members already signed up still get a page.', 'info': 'Send an informational page to people signed up for the OP. No response expected.', 'broadcast': 'Send an informational page to the whole team. No response expected.', 'leave': 'Transit page to people signed up for the event. Responses will mark the participant as departed.', 'return': 'Transit page to people signed up for the event. Responses will mark the participant as returned home.', 'test': 'Test page. DO NOT USE IN A REAL CALLOUT.'}\n self.initial['instructions'] = instructions.get(self.initial['period_format'], 'WARNING: Unknown period_format')\n return {**context, **self.initial}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000383", "length_bytes": 19625, "license_type": "permissive", "methods": [{"docstring": "Return context for standard paging.", "name": "get_queryset", "signature": "def get_queryset(self)"}, {"docstring": "Add additional useful information.", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004911", "prompt": "Implement the Python class `MessageCreateBaseView` described below.\n\nClass description:\nImplement the MessageCreateBaseView class.\n\nMethod signatures and docstrings:\n- def get_queryset(self): Return context for standard paging.\n- def get_context_data(self, **kwargs): Add additional useful information.", "prompted_full_text": "Implement the Python class `MessageCreateBaseView` described below.\n\nClass description:\nImplement the MessageCreateBaseView class.\n\nMethod signatures and docstrings:\n- def get_queryset(self): Return context for standard paging.\n- def get_context_data(self, **kwargs): Add additional useful information.\n\n<|skeleton|>\nclass MessageCreateBaseView:\n\n def get_queryset(self):\n \"\"\"Return context for standard paging.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional useful information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n format_convert = {'headsUp': ['invite', 'Heads Up'], 'available': ['invite', 'Available?'], 'leave': ['leave', 'Left?'], 'return': ['return', 'Returned?'], 'info': ['info', None], 'broadcast': ['broadcast', None], 'test': ['test', 'Test']}\n initial = {}\n initial['author'] = self.request.user.pk\n initial['type'] = 'std_page'\n initial['format'] = 'page'\n members = None\n page_format = self.request.GET.get('page_format', self.page_format)\n period_format = format_convert[page_format][0]\n initial['period_format'] = period_format\n rsvp_name = format_convert[page_format][1]\n self.rsvp_template = None\n if rsvp_name is not None:\n try:\n self.rsvp_template = RsvpTemplate.objects.get(name=rsvp_name)\n initial['rsvp_template'] = self.rsvp_template\n except RsvpTemplate.DoesNotExist:\n logger.error('RsvpTemplate {} not found for format: {}'.format(rsvp_name, page_format))\n self.initial = initial\n return members\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n if self.rsvp_template and self.initial['type'] != 'repage':\n self.initial['input'] = '{} {}'.format(self.initial['input'], self.rsvp_template.text)\n instructions = {'invite': 'Page the team to invite them to the OP. Members already signed up still get a page.', 'info': 'Send an informational page to people signed up for the OP. No response expected.', 'broadcast': 'Send an informational page to the whole team. No response expected.', 'leave': 'Transit page to people signed up for the event. Responses will mark the participant as departed.', 'return': 'Transit page to people signed up for the event. Responses will mark the participant as returned home.', 'test': 'Test page. DO NOT USE IN A REAL CALLOUT.'}\n self.initial['instructions'] = instructions.get(self.initial['period_format'], 'WARNING: Unknown period_format')\n return {**context, **self.initial}\n<|end_body_1|>\n", "revision_id": "b988b6e41c786448c4a8a76c11397d195f802a26", "skeleton": "<|skeleton|>\nclass MessageCreateBaseView:\n\n def get_queryset(self):\n \"\"\"Return context for standard paging.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional useful information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MessageCreateBaseView:\n def get_queryset(self):\n \"\"\"Return context for standard paging.\"\"\"\n format_convert = {'headsUp': ['invite', 'Heads Up'], 'available': ['invite', 'Available?'], 'leave': ['leave', 'Left?'], 'return': ['return', 'Returned?'], 'info': ['info', None], 'broadcast': ['broadcast', None], 'test': ['test', 'Test']}\n initial = {}\n initial['author'] = self.request.user.pk\n initial['type'] = 'std_page'\n initial['format'] = 'page'\n members = None\n page_format = self.request.GET.get('page_format', self.page_format)\n period_format = format_convert[page_format][0]\n initial['period_format'] = period_format\n rsvp_name = format_convert[page_format][1]\n self.rsvp_template = None\n if rsvp_name is not None:\n try:\n self.rsvp_template = RsvpTemplate.objects.get(name=rsvp_name)\n initial['rsvp_template'] = self.rsvp_template\n except RsvpTemplate.DoesNotExist:\n logger.error('RsvpTemplate {} not found for format: {}'.format(rsvp_name, page_format))\n self.initial = initial\n return members\n\n def get_context_data(self, **kwargs):\n \"\"\"Add additional useful information.\"\"\"\n context = super().get_context_data(**kwargs)\n if self.rsvp_template and self.initial['type'] != 'repage':\n self.initial['input'] = '{} {}'.format(self.initial['input'], self.rsvp_template.text)\n instructions = {'invite': 'Page the team to invite them to the OP. Members already signed up still get a page.', 'info': 'Send an informational page to people signed up for the OP. No response expected.', 'broadcast': 'Send an informational page to the whole team. No response expected.', 'leave': 'Transit page to people signed up for the event. Responses will mark the participant as departed.', 'return': 'Transit page to people signed up for the event. Responses will mark the participant as returned home.', 'test': 'Test page. DO NOT USE IN A REAL CALLOUT.'}\n self.initial['instructions'] = instructions.get(self.initial['period_format'], 'WARNING: Unknown period_format')\n return {**context, **self.initial}\n", "source": "the_stack_v2_python_sparse", "source_path": "main/views/message_views.py", "source_repo": "BAMRU-Tech/bamru_net", "split": "val", "star_events_count": 7} {"blob_id": "796cfb8e71990ec8a252dc775aaff0e21be06e15", "bodies": ["self.do_lower_case = do_lower_case\nself.never_split = never_split if never_split is not None else []\nself.normalize_text = normalize_text\nself.trim_whitespace = trim_whitespace\ntry:\n import rhoknp\nexcept ImportError:\n raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.')\nself.juman = rhoknp.Jumanpp()", "if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\ntext = text.strip()\nnever_split = self.never_split + (never_split if never_split is not None else [])\ntokens = []\nfor mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n if self.trim_whitespace:\n if token.strip() == '':\n continue\n else:\n token = token.strip()\n tokens.append(token)\nreturn tokens"], "bodies_text": "<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.trim_whitespace = trim_whitespace\n try:\n import rhoknp\n except ImportError:\n raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.')\n self.juman = rhoknp.Jumanpp()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n text = text.strip()\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n for mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n if self.trim_whitespace:\n if token.strip() == '':\n continue\n else:\n token = token.strip()\n tokens.append(token)\n return tokens\n<|end_body_1|>\n", "class_docstring": "Runs basic tokenization with jumanpp morphological parser.", "class_name": "JumanppTokenizer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JumanppTokenizer:\n \"\"\"Runs basic tokenization with jumanpp morphological parser.\"\"\"\n\n def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False):\n \"\"\"Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\"\"\"\n <|body_0|>\n\n def tokenize(self, text, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.trim_whitespace = trim_whitespace\n try:\n import rhoknp\n except ImportError:\n raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.')\n self.juman = rhoknp.Jumanpp()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n text = text.strip()\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n for mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n if self.trim_whitespace:\n if token.strip() == '':\n continue\n else:\n token = token.strip()\n tokens.append(token)\n return tokens\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000384", "length_bytes": 40187, "license_type": "permissive", "methods": [{"docstring": "Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.", "name": "__init__", "signature": "def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False)"}, {"docstring": "Tokenizes a piece of text.", "name": "tokenize", "signature": "def tokenize(self, text, never_split=None, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `JumanppTokenizer` described below.\n\nClass description:\nRuns basic tokenization with jumanpp morphological parser.\n\nMethod signatures and docstrings:\n- def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False): Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\n- def tokenize(self, text, never_split=None, **kwargs): Tokenizes a piece of text.", "prompted_full_text": "Implement the Python class `JumanppTokenizer` described below.\n\nClass description:\nRuns basic tokenization with jumanpp morphological parser.\n\nMethod signatures and docstrings:\n- def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False): Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\n- def tokenize(self, text, never_split=None, **kwargs): Tokenizes a piece of text.\n\n<|skeleton|>\nclass JumanppTokenizer:\n \"\"\"Runs basic tokenization with jumanpp morphological parser.\"\"\"\n\n def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False):\n \"\"\"Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\"\"\"\n <|body_0|>\n\n def tokenize(self, text, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.trim_whitespace = trim_whitespace\n try:\n import rhoknp\n except ImportError:\n raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.')\n self.juman = rhoknp.Jumanpp()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n text = text.strip()\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n for mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n if self.trim_whitespace:\n if token.strip() == '':\n continue\n else:\n token = token.strip()\n tokens.append(token)\n return tokens\n<|end_body_1|>\n", "revision_id": "4fa0aff21ee083d0197a898cdf17ff476fae2ac3", "skeleton": "<|skeleton|>\nclass JumanppTokenizer:\n \"\"\"Runs basic tokenization with jumanpp morphological parser.\"\"\"\n\n def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False):\n \"\"\"Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\"\"\"\n <|body_0|>\n\n def tokenize(self, text, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class JumanppTokenizer:\n \"\"\"Runs basic tokenization with jumanpp morphological parser.\"\"\"\n\n def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False):\n \"\"\"Constructs a JumanppTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens.\"\"\"\n self.do_lower_case = do_lower_case\n self.never_split = never_split if never_split is not None else []\n self.normalize_text = normalize_text\n self.trim_whitespace = trim_whitespace\n try:\n import rhoknp\n except ImportError:\n raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.')\n self.juman = rhoknp.Jumanpp()\n\n def tokenize(self, text, never_split=None, **kwargs):\n \"\"\"Tokenizes a piece of text.\"\"\"\n if self.normalize_text:\n text = unicodedata.normalize('NFKC', text)\n text = text.strip()\n never_split = self.never_split + (never_split if never_split is not None else [])\n tokens = []\n for mrph in self.juman.apply_to_sentence(text).morphemes:\n token = mrph.text\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n if self.trim_whitespace:\n if token.strip() == '':\n continue\n else:\n token = token.strip()\n tokens.append(token)\n return tokens\n", "source": "the_stack_v2_python_sparse", "source_path": "src/transformers/models/bert_japanese/tokenization_bert_japanese.py", "source_repo": "huggingface/transformers", "split": "val", "star_events_count": 102193} {"blob_id": "8680b6fcfc4989d7dc785e1e04da19704f09798f", "bodies": ["moveCnt = 0\nk = len(nums) - 1\nwhile k >= 0:\n if nums[k] == 0:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\nprint('Totally, {0} moves'.format(moveCnt))", "moveEnable = False\nmoveCnt = 0\nk = len(nums) - 1\nwhile k >= 0:\n if nums[k] != 0:\n moveEnable = True\n if nums[k] == 0 and moveEnable == True:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\nprint('Totally, {0} moves'.format(moveCnt))"], "bodies_text": "<|body_start_0|>\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] == 0:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_0|>\n\n<|body_start_1|>\n moveEnable = False\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] != 0:\n moveEnable = True\n if nums[k] == 0 and moveEnable == True:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\"\"\"\n <|body_0|>\n\n def moveZeroesRefined(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] == 0:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_0|>\n\n<|body_start_1|>\n moveEnable = False\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] != 0:\n moveEnable = True\n if nums[k] == 0 and moveEnable == True:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000385", "length_bytes": 2031, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.", "name": "moveZeroes", "signature": "def moveZeroes(self, nums) -> None"}, {"docstring": "Do not return anything, modify nums in-place instead.", "name": "moveZeroesRefined", "signature": "def moveZeroesRefined(self, nums) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000241", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums) -> None: Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\n- def moveZeroesRefined(self, nums) -> None: Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums) -> None: Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\n- def moveZeroesRefined(self, nums) -> None: Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\"\"\"\n <|body_0|>\n\n def moveZeroesRefined(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] == 0:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_0|>\n\n<|body_start_1|>\n moveEnable = False\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] != 0:\n moveEnable = True\n if nums[k] == 0 and moveEnable == True:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n<|end_body_1|>\n", "revision_id": "1007197ff0feda35001c0aaf13382af6869869b2", "skeleton": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\"\"\"\n <|body_0|>\n\n def moveZeroesRefined(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def moveZeroes(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead. NOTE: This solution is inefficient, in the sense that the move for the trailing zeros should be skipped.\"\"\"\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] == 0:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n\n def moveZeroesRefined(self, nums) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n moveEnable = False\n moveCnt = 0\n k = len(nums) - 1\n while k >= 0:\n if nums[k] != 0:\n moveEnable = True\n if nums[k] == 0 and moveEnable == True:\n nums.pop(k)\n nums.append(0)\n moveCnt = moveCnt + 1\n k = k - 1\n print('Totally, {0} moves'.format(moveCnt))\n", "source": "the_stack_v2_python_sparse", "source_path": "array-and-string/moveZeroes.py", "source_repo": "chenxy3791/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "3ebf0c731c063736fdb26e7dcab7af14c04c2f37", "bodies": ["directory_path = 'data'\nparallel.drop_data()\nlist1 = parallel.import_data(directory_path, 'products.csv', 'customers.csv')\nself.assertEqual(list1[0][0], 999)\nself.assertEqual(list1[0][1], 0)\nself.assertEqual(list1[0][2], 999)\nself.assertTrue(list1[0][3] > 0)\nself.assertEqual(list1[1][0], 999)\nself.assertEqual(list1[1][1], 0)\nself.assertEqual(list1[1][2], 999)\nself.assertTrue(list1[1][3] > 0)\nparallel.drop_data()\nlist2 = parallel.import_data(directory_path, 'products.csv', 'nothing.csv')\nself.assertEqual(list2[0][0], 999)\nself.assertEqual(list2[0][1], 0)\nself.assertEqual(list2[0][2], 999)\nself.assertTrue(list2[0][3] > 0)\nself.assertEqual(list2[1], None)", "directory_path = 'data'\nparallel.drop_data()\nlist1 = parallel.import_data_thread(directory_path, 'products.csv', 'customers.csv')\nself.assertEqual(list1[0][0], 999)\nself.assertEqual(list1[0][1], 0)\nself.assertEqual(list1[0][2], 999)\nself.assertTrue(list1[0][3] > 0)\nself.assertEqual(list1[1][0], 999)\nself.assertEqual(list1[1][1], 0)\nself.assertEqual(list1[1][2], 999)\nself.assertTrue(list1[1][3] > 0)\nparallel.drop_data()\nlist2 = parallel.import_data_thread(directory_path, 'products.csv', 'nothing.csv')\nself.assertEqual(list2[0][0], 999)\nself.assertEqual(list2[0][1], 0)\nself.assertEqual(list2[0][2], 999)\nself.assertTrue(list2[0][3] > 0)\nself.assertEqual(list2[1], None)"], "bodies_text": "<|body_start_0|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_0|>\n\n<|body_start_1|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data_thread(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data_thread(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_1|>\n", "class_docstring": "Tests for the database module", "class_name": "LinearTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LinearTests:\n \"\"\"Tests for the database module\"\"\"\n\n def test_import_data(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_0|>\n\n def test_import_data_thread(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_0|>\n\n<|body_start_1|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data_thread(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data_thread(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000386", "length_bytes": 2454, "license_type": "no_license", "methods": [{"docstring": "Tests the import_data function", "name": "test_import_data", "signature": "def test_import_data(self)"}, {"docstring": "Tests the import_data function", "name": "test_import_data_thread", "signature": "def test_import_data_thread(self)"}], "n_methods": 2, "prompt": "Implement the Python class `LinearTests` described below.\n\nClass description:\nTests for the database module\n\nMethod signatures and docstrings:\n- def test_import_data(self): Tests the import_data function\n- def test_import_data_thread(self): Tests the import_data function", "prompted_full_text": "Implement the Python class `LinearTests` described below.\n\nClass description:\nTests for the database module\n\nMethod signatures and docstrings:\n- def test_import_data(self): Tests the import_data function\n- def test_import_data_thread(self): Tests the import_data function\n\n<|skeleton|>\nclass LinearTests:\n \"\"\"Tests for the database module\"\"\"\n\n def test_import_data(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_0|>\n\n def test_import_data_thread(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_0|>\n\n<|body_start_1|>\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data_thread(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data_thread(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n<|end_body_1|>\n", "revision_id": "5dac60f39e3909ff05b26721d602ed20f14d6be3", "skeleton": "<|skeleton|>\nclass LinearTests:\n \"\"\"Tests for the database module\"\"\"\n\n def test_import_data(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_0|>\n\n def test_import_data_thread(self):\n \"\"\"Tests the import_data function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LinearTests:\n \"\"\"Tests for the database module\"\"\"\n\n def test_import_data(self):\n \"\"\"Tests the import_data function\"\"\"\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n\n def test_import_data_thread(self):\n \"\"\"Tests the import_data function\"\"\"\n directory_path = 'data'\n parallel.drop_data()\n list1 = parallel.import_data_thread(directory_path, 'products.csv', 'customers.csv')\n self.assertEqual(list1[0][0], 999)\n self.assertEqual(list1[0][1], 0)\n self.assertEqual(list1[0][2], 999)\n self.assertTrue(list1[0][3] > 0)\n self.assertEqual(list1[1][0], 999)\n self.assertEqual(list1[1][1], 0)\n self.assertEqual(list1[1][2], 999)\n self.assertTrue(list1[1][3] > 0)\n parallel.drop_data()\n list2 = parallel.import_data_thread(directory_path, 'products.csv', 'nothing.csv')\n self.assertEqual(list2[0][0], 999)\n self.assertEqual(list2[0][1], 0)\n self.assertEqual(list2[0][2], 999)\n self.assertTrue(list2[0][3] > 0)\n self.assertEqual(list2[1], None)\n", "source": "the_stack_v2_python_sparse", "source_path": "students/amirg/lesson07/assignment/test_parallel.py", "source_repo": "JavaRod/SP_Python220B_2019", "split": "val", "star_events_count": 1} {"blob_id": "c4d3bb0ba74c8a302027dda80c5c671b7c59293b", "bodies": ["model = model if isinstance(model, SpaceForDialogIntent) else Model.from_pretrained(model)\nif preprocessor is None:\n preprocessor = DialogIntentPredictionPreprocessor(model.model_dir)\nself.model = model\nsuper().__init__(model=model, preprocessor=preprocessor, **kwargs)\nself.categories = preprocessor.categories", "import numpy as np\npred = inputs['pred']\npos = np.where(pred == np.max(pred))\nreturn {OutputKeys.OUTPUT: {OutputKeys.PREDICTION: pred, OutputKeys.LABEL_POS: pos[0], OutputKeys.LABEL: self.categories[pos[0][0]]}}"], "bodies_text": "<|body_start_0|>\n model = model if isinstance(model, SpaceForDialogIntent) else Model.from_pretrained(model)\n if preprocessor is None:\n preprocessor = DialogIntentPredictionPreprocessor(model.model_dir)\n self.model = model\n super().__init__(model=model, preprocessor=preprocessor, **kwargs)\n self.categories = preprocessor.categories\n<|end_body_0|>\n\n<|body_start_1|>\n import numpy as np\n pred = inputs['pred']\n pos = np.where(pred == np.max(pred))\n return {OutputKeys.OUTPUT: {OutputKeys.PREDICTION: pred, OutputKeys.LABEL_POS: pos[0], OutputKeys.LABEL: self.categories[pos[0][0]]}}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DialogIntentPredictionPipeline", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DialogIntentPredictionPipeline:\n\n def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs):\n \"\"\"Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\"\"\"\n <|body_0|>\n\n def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]:\n \"\"\"process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n model = model if isinstance(model, SpaceForDialogIntent) else Model.from_pretrained(model)\n if preprocessor is None:\n preprocessor = DialogIntentPredictionPreprocessor(model.model_dir)\n self.model = model\n super().__init__(model=model, preprocessor=preprocessor, **kwargs)\n self.categories = preprocessor.categories\n<|end_body_0|>\n\n<|body_start_1|>\n import numpy as np\n pred = inputs['pred']\n pos = np.where(pred == np.max(pred))\n return {OutputKeys.OUTPUT: {OutputKeys.PREDICTION: pred, OutputKeys.LABEL_POS: pos[0], OutputKeys.LABEL: self.categories[pos[0][0]]}}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000387", "length_bytes": 2215, "license_type": "permissive", "methods": [{"docstring": "Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.", "name": "__init__", "signature": "def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs)"}, {"docstring": "process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results", "name": "postprocess", "signature": "def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]"}], "n_methods": 2, "prompt": "Implement the Python class `DialogIntentPredictionPipeline` described below.\n\nClass description:\nImplement the DialogIntentPredictionPipeline class.\n\nMethod signatures and docstrings:\n- def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs): Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\n- def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]: process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results", "prompted_full_text": "Implement the Python class `DialogIntentPredictionPipeline` described below.\n\nClass description:\nImplement the DialogIntentPredictionPipeline class.\n\nMethod signatures and docstrings:\n- def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs): Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\n- def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]: process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results\n\n<|skeleton|>\nclass DialogIntentPredictionPipeline:\n\n def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs):\n \"\"\"Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\"\"\"\n <|body_0|>\n\n def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]:\n \"\"\"process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n model = model if isinstance(model, SpaceForDialogIntent) else Model.from_pretrained(model)\n if preprocessor is None:\n preprocessor = DialogIntentPredictionPreprocessor(model.model_dir)\n self.model = model\n super().__init__(model=model, preprocessor=preprocessor, **kwargs)\n self.categories = preprocessor.categories\n<|end_body_0|>\n\n<|body_start_1|>\n import numpy as np\n pred = inputs['pred']\n pos = np.where(pred == np.max(pred))\n return {OutputKeys.OUTPUT: {OutputKeys.PREDICTION: pred, OutputKeys.LABEL_POS: pos[0], OutputKeys.LABEL: self.categories[pos[0][0]]}}\n<|end_body_1|>\n", "revision_id": "8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6", "skeleton": "<|skeleton|>\nclass DialogIntentPredictionPipeline:\n\n def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs):\n \"\"\"Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\"\"\"\n <|body_0|>\n\n def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]:\n \"\"\"process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DialogIntentPredictionPipeline:\n def __init__(self, model: Union[SpaceForDialogIntent, str], preprocessor: DialogIntentPredictionPreprocessor=None, **kwargs):\n \"\"\"Use `model` and `preprocessor` to create a dialog intent prediction pipeline Args: model (str or SpaceForDialogIntent): Supply either a local model dir or a model id from the model hub, or a SpaceForDialogIntent instance. preprocessor (DialogIntentPredictionPreprocessor): An optional preprocessor instance.\"\"\"\n model = model if isinstance(model, SpaceForDialogIntent) else Model.from_pretrained(model)\n if preprocessor is None:\n preprocessor = DialogIntentPredictionPreprocessor(model.model_dir)\n self.model = model\n super().__init__(model=model, preprocessor=preprocessor, **kwargs)\n self.categories = preprocessor.categories\n\n def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, str]:\n \"\"\"process the prediction results Args: inputs (Dict[str, Any]): _description_ Returns: Dict[str, str]: the prediction results\"\"\"\n import numpy as np\n pred = inputs['pred']\n pos = np.where(pred == np.max(pred))\n return {OutputKeys.OUTPUT: {OutputKeys.PREDICTION: pred, OutputKeys.LABEL_POS: pos[0], OutputKeys.LABEL: self.categories[pos[0][0]]}}\n", "source": "the_stack_v2_python_sparse", "source_path": "ai/modelscope/modelscope/pipelines/nlp/dialog_intent_prediction_pipeline.py", "source_repo": "alldatacenter/alldata", "split": "val", "star_events_count": 774} {"blob_id": "946c0e46b7e3e6b2a39f7d7686a6a31e7051d7df", "bodies": ["super(LSTM, self).__init__()\nself.hidden_size = d_model\nself.lstm = nn.LSTM(input_size=input_size, hidden_size=d_model, num_layers=layers, batch_first=True)\nself.fc1 = nn.Linear(d_model, d_model)\nself.fc2 = nn.Linear(d_model, out_len)\nself.drop_out = nn.Dropout(dropout)\nself.device = device\nself.num_layers = layers", "h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\nc_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\nout, (_, _) = self.lstm(x, (h_0, c_0))\nout = self.drop_out(out)\nout = out[:, -1, :]\nout = self.fc1(out)\noutput = self.fc2(out)\nreturn output.unsqueeze(-1)"], "bodies_text": "<|body_start_0|>\n super(LSTM, self).__init__()\n self.hidden_size = d_model\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=d_model, num_layers=layers, batch_first=True)\n self.fc1 = nn.Linear(d_model, d_model)\n self.fc2 = nn.Linear(d_model, out_len)\n self.drop_out = nn.Dropout(dropout)\n self.device = device\n self.num_layers = layers\n<|end_body_0|>\n\n<|body_start_1|>\n h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n out, (_, _) = self.lstm(x, (h_0, c_0))\n out = self.drop_out(out)\n out = out[:, -1, :]\n out = self.fc1(out)\n output = self.fc2(out)\n return output.unsqueeze(-1)\n<|end_body_1|>\n", "class_docstring": "An implementation of LSTM for forecasting.", "class_name": "LSTM", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LSTM:\n \"\"\"An implementation of LSTM for forecasting.\"\"\"\n\n def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')):\n \"\"\"Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LSTM, self).__init__()\n self.hidden_size = d_model\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=d_model, num_layers=layers, batch_first=True)\n self.fc1 = nn.Linear(d_model, d_model)\n self.fc2 = nn.Linear(d_model, out_len)\n self.drop_out = nn.Dropout(dropout)\n self.device = device\n self.num_layers = layers\n<|end_body_0|>\n\n<|body_start_1|>\n h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n out, (_, _) = self.lstm(x, (h_0, c_0))\n out = self.drop_out(out)\n out = out[:, -1, :]\n out = self.fc1(out)\n output = self.fc2(out)\n return output.unsqueeze(-1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000388", "length_bytes": 4262, "license_type": "permissive", "methods": [{"docstring": "Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model", "name": "__init__", "signature": "def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0'))"}, {"docstring": "Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `LSTM` described below.\n\nClass description:\nAn implementation of LSTM for forecasting.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')): Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\n- def forward(self, x): Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`", "prompted_full_text": "Implement the Python class `LSTM` described below.\n\nClass description:\nAn implementation of LSTM for forecasting.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')): Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\n- def forward(self, x): Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`\n\n<|skeleton|>\nclass LSTM:\n \"\"\"An implementation of LSTM for forecasting.\"\"\"\n\n def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')):\n \"\"\"Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LSTM, self).__init__()\n self.hidden_size = d_model\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=d_model, num_layers=layers, batch_first=True)\n self.fc1 = nn.Linear(d_model, d_model)\n self.fc2 = nn.Linear(d_model, out_len)\n self.drop_out = nn.Dropout(dropout)\n self.device = device\n self.num_layers = layers\n<|end_body_0|>\n\n<|body_start_1|>\n h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n out, (_, _) = self.lstm(x, (h_0, c_0))\n out = self.drop_out(out)\n out = out[:, -1, :]\n out = self.fc1(out)\n output = self.fc2(out)\n return output.unsqueeze(-1)\n<|end_body_1|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass LSTM:\n \"\"\"An implementation of LSTM for forecasting.\"\"\"\n\n def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')):\n \"\"\"Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LSTM:\n \"\"\"An implementation of LSTM for forecasting.\"\"\"\n\n def __init__(self, input_size, out_len, d_model=512, layers=3, dropout=0.0, device=torch.device('cuda:0')):\n \"\"\"Initializes a LSTM instance. Args: input_size: Input features dimension out_len: Forecasting horizon d_model: Hidden layer dimension layers: Number of LSTM layers. dropout: Fraction of neurons affected by Dropout (default=0.0). device: Device used by the model\"\"\"\n super(LSTM, self).__init__()\n self.hidden_size = d_model\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=d_model, num_layers=layers, batch_first=True)\n self.fc1 = nn.Linear(d_model, d_model)\n self.fc2 = nn.Linear(d_model, out_len)\n self.drop_out = nn.Dropout(dropout)\n self.device = device\n self.num_layers = layers\n\n def forward(self, x):\n \"\"\"Forward pass for LSTM. Args: x: A tensor of shape `(batch_size, seqence_length, input_size)` Returns: output: The forecast, a tensor of shape `(batch_size, out_len, 1)`\"\"\"\n h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(self.device)\n out, (_, _) = self.lstm(x, (h_0, c_0))\n out = self.drop_out(out)\n out = out[:, -1, :]\n out = self.fc1(out)\n output = self.fc2(out)\n return output.unsqueeze(-1)\n", "source": "the_stack_v2_python_sparse", "source_path": "ime/models/lstm.py", "source_repo": "Jimmy-INL/google-research", "split": "val", "star_events_count": 1} {"blob_id": "146fa6ee6cf70eba66fbb92a0411b127ca328b30", "bodies": ["ret = []\nnames = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)\nperm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)\nremove_self = ProfilePermission.PRF_CONTROL_SELF in permissions\nremove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions\nis_default = channel_model.config.default_profile_oid == profile_oid\nuser_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())\nfor uid, name in sorted(names.items(), key=lambda item: item[1]):\n if not name:\n name = str(uid)\n controllable = False\n if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):\n controllable = remove_self if uid == requester_oid else remove_member\n ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))\nreturn ret", "ret = []\nprofs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))\nif not profs:\n return ret\nchannel_model = ChannelManager.get_channel_oid(channel_oid)\nif not channel_model:\n return ret\nuser_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])\nuser_oids = []\nfor _, onplat_oids in user_oids_dict.items():\n user_oids.extend(onplat_oids)\nuser_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\nfor prof in profs:\n uids = user_oids_dict.get(prof.id, [])\n ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))\nreturn ret"], "bodies_text": "<|body_start_0|>\n ret = []\n names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)\n perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)\n remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions\n remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions\n is_default = channel_model.config.default_profile_oid == profile_oid\n user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())\n for uid, name in sorted(names.items(), key=lambda item: item[1]):\n if not name:\n name = str(uid)\n controllable = False\n if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):\n controllable = remove_self if uid == requester_oid else remove_member\n ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))\n if not profs:\n return ret\n channel_model = ChannelManager.get_channel_oid(channel_oid)\n if not channel_model:\n return ret\n user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])\n user_oids = []\n for _, onplat_oids in user_oids_dict.items():\n user_oids.extend(onplat_oids)\n user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\n for prof in profs:\n uids = user_oids_dict.get(prof.id, [])\n ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))\n return ret\n<|end_body_1|>\n", "class_docstring": "Helper to process the profile data.", "class_name": "ProfileHelper", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProfileHelper:\n \"\"\"Helper to process the profile data.\"\"\"\n\n def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]:\n \"\"\"Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\"\"\"\n <|body_0|>\n\n def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]:\n \"\"\"Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)\n perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)\n remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions\n remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions\n is_default = channel_model.config.default_profile_oid == profile_oid\n user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())\n for uid, name in sorted(names.items(), key=lambda item: item[1]):\n if not name:\n name = str(uid)\n controllable = False\n if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):\n controllable = remove_self if uid == requester_oid else remove_member\n ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))\n if not profs:\n return ret\n channel_model = ChannelManager.get_channel_oid(channel_oid)\n if not channel_model:\n return ret\n user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])\n user_oids = []\n for _, onplat_oids in user_oids_dict.items():\n user_oids.extend(onplat_oids)\n user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\n for prof in profs:\n uids = user_oids_dict.get(prof.id, [])\n ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000389", "length_bytes": 4237, "license_type": "permissive", "methods": [{"docstring": "Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result", "name": "get_user_profile_controls", "signature": "def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]"}, {"docstring": "Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles", "name": "get_channel_profiles", "signature": "def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]"}], "n_methods": 2, "prompt": "Implement the Python class `ProfileHelper` described below.\n\nClass description:\nHelper to process the profile data.\n\nMethod signatures and docstrings:\n- def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]: Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\n- def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]: Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles", "prompted_full_text": "Implement the Python class `ProfileHelper` described below.\n\nClass description:\nHelper to process the profile data.\n\nMethod signatures and docstrings:\n- def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]: Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\n- def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]: Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles\n\n<|skeleton|>\nclass ProfileHelper:\n \"\"\"Helper to process the profile data.\"\"\"\n\n def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]:\n \"\"\"Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\"\"\"\n <|body_0|>\n\n def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]:\n \"\"\"Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)\n perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)\n remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions\n remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions\n is_default = channel_model.config.default_profile_oid == profile_oid\n user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())\n for uid, name in sorted(names.items(), key=lambda item: item[1]):\n if not name:\n name = str(uid)\n controllable = False\n if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):\n controllable = remove_self if uid == requester_oid else remove_member\n ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))\n if not profs:\n return ret\n channel_model = ChannelManager.get_channel_oid(channel_oid)\n if not channel_model:\n return ret\n user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])\n user_oids = []\n for _, onplat_oids in user_oids_dict.items():\n user_oids.extend(onplat_oids)\n user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\n for prof in profs:\n uids = user_oids_dict.get(prof.id, [])\n ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))\n return ret\n<|end_body_1|>\n", "revision_id": "c7da1e91783dce3a2b71b955b3a22b68db9056cf", "skeleton": "<|skeleton|>\nclass ProfileHelper:\n \"\"\"Helper to process the profile data.\"\"\"\n\n def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]:\n \"\"\"Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\"\"\"\n <|body_0|>\n\n def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]:\n \"\"\"Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProfileHelper:\n \"\"\"Helper to process the profile data.\"\"\"\n\n def get_user_profile_controls(channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) -> List[ProfileControlEntry]:\n \"\"\"Check if the requester can perform certain actions on members who have the certain profile. The **certain actions** mentioned above currently are: - Control the profile attaching status Actions are unable to perform on the users who have a higher permission level. Actions also cannot be performed on default profile. .. note:: This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``. :param channel_model: channel data of the profile :param profile_oid: OID of the profile :param requester_oid: OID of the user who requested this check :param permissions: permissions that the requester has :return: list of `ProfileControlEntry` containing the check result\"\"\"\n ret = []\n names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)\n perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)\n remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions\n remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions\n is_default = channel_model.config.default_profile_oid == profile_oid\n user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())\n for uid, name in sorted(names.items(), key=lambda item: item[1]):\n if not name:\n name = str(uid)\n controllable = False\n if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):\n controllable = remove_self if uid == requester_oid else remove_member\n ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))\n return ret\n\n def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str]=None) -> List[ChannelProfileEntry]:\n \"\"\"Get a list of the channel profiles in ``channel_oid``. ``partial_name`` can be a part of the profile name. :param channel_oid: channel to get the profiles :param partial_name: keyword to get the profiles :return: list of channel profiles\"\"\"\n ret = []\n profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))\n if not profs:\n return ret\n channel_model = ChannelManager.get_channel_oid(channel_oid)\n if not channel_model:\n return ret\n user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])\n user_oids = []\n for _, onplat_oids in user_oids_dict.items():\n user_oids.extend(onplat_oids)\n user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\n for prof in profs:\n uids = user_oids_dict.get(prof.id, [])\n ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "mongodb/helper/prof.py", "source_repo": "RxJellyBot/Jelly-Bot", "split": "val", "star_events_count": 5} {"blob_id": "06e49ad763cea5c869d7afa3cc69f4865e387308", "bodies": ["s1 = sorted(s1)\nwindow_sz = len(s1)\nfor i in range(len(s2) - window_sz + 1):\n sub_s2 = sorted(s2[i:i + window_sz])\n if s1 == sub_s2:\n return True\nreturn False", "counter1 = Counter(s1)\nwindow_sz = len(s1)\nfor i in range(len(s2) - window_sz + 1):\n counter2 = Counter(s2[i:i + window_sz])\n if counter1 == counter2:\n return True\nreturn False", "counter1 = Counter(s1)\nwindow_sz = len(s1)\ncounter2 = Counter(s2[:window_sz])\nif counter1 == counter2:\n return True\nfor i in range(1, len(s2) - window_sz + 1):\n counter2[s2[i - 1]] -= 1\n if counter2[s2[i - 1]] <= 0:\n del counter2[s2[i - 1]]\n counter2[s2[i + window_sz - 1]] += 1\n if counter1 == counter2:\n return True\nreturn False", "if len(s1) > len(s2):\n return False\nwindow_sz = len(s1)\ncounter1, counter2 = ([0] * 26, [0] * 26)\nfor i in range(window_sz):\n counter1[ord(s1[i]) - ord('a')] += 1\n counter2[ord(s2[i]) - ord('a')] += 1\nif counter1 == counter2:\n return True\nfor i in range(1, len(s2) - window_sz + 1):\n counter2[ord(s2[i - 1]) - ord('a')] -= 1\n counter2[ord(s2[i + window_sz - 1]) - ord('a')] += 1\n if counter1 == counter2:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n s1 = sorted(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n sub_s2 = sorted(s2[i:i + window_sz])\n if s1 == sub_s2:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n counter2 = Counter(s2[i:i + window_sz])\n if counter1 == counter2:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n counter2 = Counter(s2[:window_sz])\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[s2[i - 1]] -= 1\n if counter2[s2[i - 1]] <= 0:\n del counter2[s2[i - 1]]\n counter2[s2[i + window_sz - 1]] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n if len(s1) > len(s2):\n return False\n window_sz = len(s1)\n counter1, counter2 = ([0] * 26, [0] * 26)\n for i in range(window_sz):\n counter1[ord(s1[i]) - ord('a')] += 1\n counter2[ord(s2[i]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[ord(s2[i - 1]) - ord('a')] -= 1\n counter2[ord(s2[i + window_sz - 1]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\"\"\"\n <|body_0|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\"\"\"\n <|body_1|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_2|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s1 = sorted(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n sub_s2 = sorted(s2[i:i + window_sz])\n if s1 == sub_s2:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n counter2 = Counter(s2[i:i + window_sz])\n if counter1 == counter2:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n counter2 = Counter(s2[:window_sz])\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[s2[i - 1]] -= 1\n if counter2[s2[i - 1]] <= 0:\n del counter2[s2[i - 1]]\n counter2[s2[i + window_sz - 1]] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n if len(s1) > len(s2):\n return False\n window_sz = len(s1)\n counter1, counter2 = ([0] * 26, [0] * 26)\n for i in range(window_sz):\n counter1[ord(s1[i]) - ord('a')] += 1\n counter2[ord(s2[i]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[ord(s2[i - 1]) - ord('a')] -= 1\n counter2[ord(s2[i + window_sz - 1]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000390", "length_bytes": 2508, "license_type": "no_license", "methods": [{"docstring": "Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE", "name": "checkInclusion", "signature": "def checkInclusion(self, s1: str, s2: str) -> bool"}, {"docstring": "Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)", "name": "checkInclusion", "signature": "def checkInclusion(self, s1: str, s2: str) -> bool"}, {"docstring": "Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)", "name": "checkInclusion", "signature": "def checkInclusion(self, s1: str, s2: str) -> bool"}, {"docstring": "Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)", "name": "checkInclusion", "signature": "def checkInclusion(self, s1: str, s2: str) -> bool"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004902", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\n- def checkInclusion(self, s1: str, s2: str) -> bool: Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\n\n<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\"\"\"\n <|body_0|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\"\"\"\n <|body_1|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_2|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s1 = sorted(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n sub_s2 = sorted(s2[i:i + window_sz])\n if s1 == sub_s2:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n counter2 = Counter(s2[i:i + window_sz])\n if counter1 == counter2:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n counter1 = Counter(s1)\n window_sz = len(s1)\n counter2 = Counter(s2[:window_sz])\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[s2[i - 1]] -= 1\n if counter2[s2[i - 1]] <= 0:\n del counter2[s2[i - 1]]\n counter2[s2[i + window_sz - 1]] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n if len(s1) > len(s2):\n return False\n window_sz = len(s1)\n counter1, counter2 = ([0] * 26, [0] * 26)\n for i in range(window_sz):\n counter1[ord(s1[i]) - ord('a')] += 1\n counter2[ord(s2[i]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[ord(s2[i - 1]) - ord('a')] -= 1\n counter2[ord(s2[i + window_sz - 1]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n return False\n<|end_body_3|>\n", "revision_id": "72136e3487d239f5b37e2d6393e034262a6bf599", "skeleton": "<|skeleton|>\nclass Solution:\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\"\"\"\n <|body_0|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\"\"\"\n <|body_1|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_2|>\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach2: Sort, Time: O(l1logl1 + (l2-l1)*(l1logl1+l1)), Space: O(1), TLE\"\"\"\n s1 = sorted(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n sub_s2 = sorted(s2[i:i + window_sz])\n if s1 == sub_s2:\n return True\n return False\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach3: HashTable, Time: O(l1+(l2-l1)*l1), Space: O(1)\"\"\"\n counter1 = Counter(s1)\n window_sz = len(s1)\n for i in range(len(s2) - window_sz + 1):\n counter2 = Counter(s2[i:i + window_sz])\n if counter1 == counter2:\n return True\n return False\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5 but use Counter: HashTable + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n counter1 = Counter(s1)\n window_sz = len(s1)\n counter2 = Counter(s2[:window_sz])\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[s2[i - 1]] -= 1\n if counter2[s2[i - 1]] <= 0:\n del counter2[s2[i - 1]]\n counter2[s2[i + window_sz - 1]] += 1\n if counter1 == counter2:\n return True\n return False\n\n def checkInclusion(self, s1: str, s2: str) -> bool:\n \"\"\"Approach5: Array + Sliding Window, Time: O(l1+(l2-l1)), Space: O(1)\"\"\"\n if len(s1) > len(s2):\n return False\n window_sz = len(s1)\n counter1, counter2 = ([0] * 26, [0] * 26)\n for i in range(window_sz):\n counter1[ord(s1[i]) - ord('a')] += 1\n counter2[ord(s2[i]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n for i in range(1, len(s2) - window_sz + 1):\n counter2[ord(s2[i - 1]) - ord('a')] -= 1\n counter2[ord(s2[i + window_sz - 1]) - ord('a')] += 1\n if counter1 == counter2:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "python/567-Permutation in String.py", "source_repo": "cwza/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "386fbdb9239fc74634400ff63f903a9d5fa6fdc6", "bodies": ["orchestrator = None\nbackend = 'consul'\nif kwargs.get('--orchestrator'):\n orchestrator = kwargs['--orchestrator']\nif kwargs.get('--backend'):\n backend = kwargs['--backend']\nif not orchestrator or not backend:\n logger.error('Orchestrator and backend need to be specified')\n sys.exit(1)\ntry:\n getattr(cls, '%s_%s_gen_config' % (orchestrator, backend))(**kwargs)\nexcept AttributeError:\n logger.error(\"No method to configure orchestrator '%s' with backend '%s'\", orchestrator, backend)\n sys.exit(1)", "if kwargs.get('--swarm-managers-hostname'):\n swarm_config = SwarmNodesConfig(managers_hostname=kwargs['--swarm-managers-hostname'], workers_hostname=kwargs['--swarm-workers-hostname'] if '--swarm-workers-hostname' in kwargs else [], bootstrap_address=kwargs['--bootstrap-address'] if '--bootstrap-address' in kwargs else None, domain=kwargs['--domain'] if '--domain' in kwargs else 'docker.local', deployment_type=kwargs['--deployment-type'] if '--deployment-type' in kwargs else 'swarmservices')\nelse:\n logger.error('Create config failed : swarm managers hostname need to be specified with parameter \"--swarm-managers-hostname\"')\n sys.exit(1)\nswarm_config.generate()"], "bodies_text": "<|body_start_0|>\n orchestrator = None\n backend = 'consul'\n if kwargs.get('--orchestrator'):\n orchestrator = kwargs['--orchestrator']\n if kwargs.get('--backend'):\n backend = kwargs['--backend']\n if not orchestrator or not backend:\n logger.error('Orchestrator and backend need to be specified')\n sys.exit(1)\n try:\n getattr(cls, '%s_%s_gen_config' % (orchestrator, backend))(**kwargs)\n except AttributeError:\n logger.error(\"No method to configure orchestrator '%s' with backend '%s'\", orchestrator, backend)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n if kwargs.get('--swarm-managers-hostname'):\n swarm_config = SwarmNodesConfig(managers_hostname=kwargs['--swarm-managers-hostname'], workers_hostname=kwargs['--swarm-workers-hostname'] if '--swarm-workers-hostname' in kwargs else [], bootstrap_address=kwargs['--bootstrap-address'] if '--bootstrap-address' in kwargs else None, domain=kwargs['--domain'] if '--domain' in kwargs else 'docker.local', deployment_type=kwargs['--deployment-type'] if '--deployment-type' in kwargs else 'swarmservices')\n else:\n logger.error('Create config failed : swarm managers hostname need to be specified with parameter \"--swarm-managers-hostname\"')\n sys.exit(1)\n swarm_config.generate()\n<|end_body_1|>\n", "class_docstring": "Manage config from command given to sentinel", "class_name": "ConfigManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigManager:\n \"\"\"Manage config from command given to sentinel\"\"\"\n\n def create_config(cls, logger=None, **kwargs):\n \"\"\"Create the config\"\"\"\n <|body_0|>\n\n def swarm_consul_gen_config(logger=None, **kwargs):\n \"\"\"Generate SwarmNodeConfig from command given to sentinel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n orchestrator = None\n backend = 'consul'\n if kwargs.get('--orchestrator'):\n orchestrator = kwargs['--orchestrator']\n if kwargs.get('--backend'):\n backend = kwargs['--backend']\n if not orchestrator or not backend:\n logger.error('Orchestrator and backend need to be specified')\n sys.exit(1)\n try:\n getattr(cls, '%s_%s_gen_config' % (orchestrator, backend))(**kwargs)\n except AttributeError:\n logger.error(\"No method to configure orchestrator '%s' with backend '%s'\", orchestrator, backend)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n if kwargs.get('--swarm-managers-hostname'):\n swarm_config = SwarmNodesConfig(managers_hostname=kwargs['--swarm-managers-hostname'], workers_hostname=kwargs['--swarm-workers-hostname'] if '--swarm-workers-hostname' in kwargs else [], bootstrap_address=kwargs['--bootstrap-address'] if '--bootstrap-address' in kwargs else None, domain=kwargs['--domain'] if '--domain' in kwargs else 'docker.local', deployment_type=kwargs['--deployment-type'] if '--deployment-type' in kwargs else 'swarmservices')\n else:\n logger.error('Create config failed : swarm managers hostname need to be specified with parameter \"--swarm-managers-hostname\"')\n sys.exit(1)\n swarm_config.generate()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000391", "length_bytes": 7321, "license_type": "permissive", "methods": [{"docstring": "Create the config", "name": "create_config", "signature": "def create_config(cls, logger=None, **kwargs)"}, {"docstring": "Generate SwarmNodeConfig from command given to sentinel", "name": "swarm_consul_gen_config", "signature": "def swarm_consul_gen_config(logger=None, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006438", "prompt": "Implement the Python class `ConfigManager` described below.\n\nClass description:\nManage config from command given to sentinel\n\nMethod signatures and docstrings:\n- def create_config(cls, logger=None, **kwargs): Create the config\n- def swarm_consul_gen_config(logger=None, **kwargs): Generate SwarmNodeConfig from command given to sentinel", "prompted_full_text": "Implement the Python class `ConfigManager` described below.\n\nClass description:\nManage config from command given to sentinel\n\nMethod signatures and docstrings:\n- def create_config(cls, logger=None, **kwargs): Create the config\n- def swarm_consul_gen_config(logger=None, **kwargs): Generate SwarmNodeConfig from command given to sentinel\n\n<|skeleton|>\nclass ConfigManager:\n \"\"\"Manage config from command given to sentinel\"\"\"\n\n def create_config(cls, logger=None, **kwargs):\n \"\"\"Create the config\"\"\"\n <|body_0|>\n\n def swarm_consul_gen_config(logger=None, **kwargs):\n \"\"\"Generate SwarmNodeConfig from command given to sentinel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n orchestrator = None\n backend = 'consul'\n if kwargs.get('--orchestrator'):\n orchestrator = kwargs['--orchestrator']\n if kwargs.get('--backend'):\n backend = kwargs['--backend']\n if not orchestrator or not backend:\n logger.error('Orchestrator and backend need to be specified')\n sys.exit(1)\n try:\n getattr(cls, '%s_%s_gen_config' % (orchestrator, backend))(**kwargs)\n except AttributeError:\n logger.error(\"No method to configure orchestrator '%s' with backend '%s'\", orchestrator, backend)\n sys.exit(1)\n<|end_body_0|>\n\n<|body_start_1|>\n if kwargs.get('--swarm-managers-hostname'):\n swarm_config = SwarmNodesConfig(managers_hostname=kwargs['--swarm-managers-hostname'], workers_hostname=kwargs['--swarm-workers-hostname'] if '--swarm-workers-hostname' in kwargs else [], bootstrap_address=kwargs['--bootstrap-address'] if '--bootstrap-address' in kwargs else None, domain=kwargs['--domain'] if '--domain' in kwargs else 'docker.local', deployment_type=kwargs['--deployment-type'] if '--deployment-type' in kwargs else 'swarmservices')\n else:\n logger.error('Create config failed : swarm managers hostname need to be specified with parameter \"--swarm-managers-hostname\"')\n sys.exit(1)\n swarm_config.generate()\n<|end_body_1|>\n", "revision_id": "bfe0c0007c4ee448703efc25e8110a926d432328", "skeleton": "<|skeleton|>\nclass ConfigManager:\n \"\"\"Manage config from command given to sentinel\"\"\"\n\n def create_config(cls, logger=None, **kwargs):\n \"\"\"Create the config\"\"\"\n <|body_0|>\n\n def swarm_consul_gen_config(logger=None, **kwargs):\n \"\"\"Generate SwarmNodeConfig from command given to sentinel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConfigManager:\n \"\"\"Manage config from command given to sentinel\"\"\"\n\n def create_config(cls, logger=None, **kwargs):\n \"\"\"Create the config\"\"\"\n orchestrator = None\n backend = 'consul'\n if kwargs.get('--orchestrator'):\n orchestrator = kwargs['--orchestrator']\n if kwargs.get('--backend'):\n backend = kwargs['--backend']\n if not orchestrator or not backend:\n logger.error('Orchestrator and backend need to be specified')\n sys.exit(1)\n try:\n getattr(cls, '%s_%s_gen_config' % (orchestrator, backend))(**kwargs)\n except AttributeError:\n logger.error(\"No method to configure orchestrator '%s' with backend '%s'\", orchestrator, backend)\n sys.exit(1)\n\n def swarm_consul_gen_config(logger=None, **kwargs):\n \"\"\"Generate SwarmNodeConfig from command given to sentinel\"\"\"\n if kwargs.get('--swarm-managers-hostname'):\n swarm_config = SwarmNodesConfig(managers_hostname=kwargs['--swarm-managers-hostname'], workers_hostname=kwargs['--swarm-workers-hostname'] if '--swarm-workers-hostname' in kwargs else [], bootstrap_address=kwargs['--bootstrap-address'] if '--bootstrap-address' in kwargs else None, domain=kwargs['--domain'] if '--domain' in kwargs else 'docker.local', deployment_type=kwargs['--deployment-type'] if '--deployment-type' in kwargs else 'swarmservices')\n else:\n logger.error('Create config failed : swarm managers hostname need to be specified with parameter \"--swarm-managers-hostname\"')\n sys.exit(1)\n swarm_config.generate()\n", "source": "the_stack_v2_python_sparse", "source_path": "sentinel/discovery/layers/presentation/coordination/create_config.py", "source_repo": "alterway/sentinel", "split": "val", "star_events_count": 0} {"blob_id": "0101cb4e170a8d168a24134fee231c0d44274d44", "bodies": ["LOG.debug('Plumbing VIP for amphora id: %s', amphora.get(constants.ID))\nsession = db_apis.get_session()\nwith session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\namp_data = self.network_driver.plug_aap_port(db_lb, db_lb.vip, db_amp, db_subnet)\nreturn amp_data.to_dict()", "if isinstance(result, failure.Failure):\n return\nLOG.warning('Unable to plug VIP for amphora id %s load balancer id %s', amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID])\ntry:\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]\n db_amp.ha_port_id = result[constants.HA_PORT_ID]\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet)\nexcept Exception as e:\n LOG.error('Failed to unplug AAP port. Resources may still be in use for VIP: %s due to error: %s', db_lb.vip, str(e))"], "bodies_text": "<|body_start_0|>\n LOG.debug('Plumbing VIP for amphora id: %s', amphora.get(constants.ID))\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n amp_data = self.network_driver.plug_aap_port(db_lb, db_lb.vip, db_amp, db_subnet)\n return amp_data.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n return\n LOG.warning('Unable to plug VIP for amphora id %s load balancer id %s', amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID])\n try:\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]\n db_amp.ha_port_id = result[constants.HA_PORT_ID]\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet)\n except Exception as e:\n LOG.error('Failed to unplug AAP port. Resources may still be in use for VIP: %s due to error: %s', db_lb.vip, str(e))\n<|end_body_1|>\n", "class_docstring": "Task to plumb a VIP.", "class_name": "PlugVIPAmphora", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlugVIPAmphora:\n \"\"\"Task to plumb a VIP.\"\"\"\n\n def execute(self, loadbalancer, amphora, subnet):\n \"\"\"Plumb a vip to an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):\n \"\"\"Handle a failure to plumb a vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LOG.debug('Plumbing VIP for amphora id: %s', amphora.get(constants.ID))\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n amp_data = self.network_driver.plug_aap_port(db_lb, db_lb.vip, db_amp, db_subnet)\n return amp_data.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n return\n LOG.warning('Unable to plug VIP for amphora id %s load balancer id %s', amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID])\n try:\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]\n db_amp.ha_port_id = result[constants.HA_PORT_ID]\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet)\n except Exception as e:\n LOG.error('Failed to unplug AAP port. Resources may still be in use for VIP: %s due to error: %s', db_lb.vip, str(e))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000392", "length_bytes": 44034, "license_type": "permissive", "methods": [{"docstring": "Plumb a vip to an amphora.", "name": "execute", "signature": "def execute(self, loadbalancer, amphora, subnet)"}, {"docstring": "Handle a failure to plumb a vip.", "name": "revert", "signature": "def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `PlugVIPAmphora` described below.\n\nClass description:\nTask to plumb a VIP.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer, amphora, subnet): Plumb a vip to an amphora.\n- def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): Handle a failure to plumb a vip.", "prompted_full_text": "Implement the Python class `PlugVIPAmphora` described below.\n\nClass description:\nTask to plumb a VIP.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer, amphora, subnet): Plumb a vip to an amphora.\n- def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): Handle a failure to plumb a vip.\n\n<|skeleton|>\nclass PlugVIPAmphora:\n \"\"\"Task to plumb a VIP.\"\"\"\n\n def execute(self, loadbalancer, amphora, subnet):\n \"\"\"Plumb a vip to an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):\n \"\"\"Handle a failure to plumb a vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LOG.debug('Plumbing VIP for amphora id: %s', amphora.get(constants.ID))\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n amp_data = self.network_driver.plug_aap_port(db_lb, db_lb.vip, db_amp, db_subnet)\n return amp_data.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n return\n LOG.warning('Unable to plug VIP for amphora id %s load balancer id %s', amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID])\n try:\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]\n db_amp.ha_port_id = result[constants.HA_PORT_ID]\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet)\n except Exception as e:\n LOG.error('Failed to unplug AAP port. Resources may still be in use for VIP: %s due to error: %s', db_lb.vip, str(e))\n<|end_body_1|>\n", "revision_id": "0426285a41464a5015494584f109eed35a0d44db", "skeleton": "<|skeleton|>\nclass PlugVIPAmphora:\n \"\"\"Task to plumb a VIP.\"\"\"\n\n def execute(self, loadbalancer, amphora, subnet):\n \"\"\"Plumb a vip to an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):\n \"\"\"Handle a failure to plumb a vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PlugVIPAmphora:\n \"\"\"Task to plumb a VIP.\"\"\"\n\n def execute(self, loadbalancer, amphora, subnet):\n \"\"\"Plumb a vip to an amphora.\"\"\"\n LOG.debug('Plumbing VIP for amphora id: %s', amphora.get(constants.ID))\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n amp_data = self.network_driver.plug_aap_port(db_lb, db_lb.vip, db_amp, db_subnet)\n return amp_data.to_dict()\n\n def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):\n \"\"\"Handle a failure to plumb a vip.\"\"\"\n if isinstance(result, failure.Failure):\n return\n LOG.warning('Unable to plug VIP for amphora id %s load balancer id %s', amphora.get(constants.ID), loadbalancer[constants.LOADBALANCER_ID])\n try:\n session = db_apis.get_session()\n with session.begin():\n db_amp = self.amphora_repo.get(session, id=amphora.get(constants.ID))\n db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]\n db_amp.ha_port_id = result[constants.HA_PORT_ID]\n db_subnet = self.network_driver.get_subnet(subnet[constants.ID])\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n self.network_driver.unplug_aap_port(db_lb.vip, db_amp, db_subnet)\n except Exception as e:\n LOG.error('Failed to unplug AAP port. Resources may still be in use for VIP: %s due to error: %s', db_lb.vip, str(e))\n", "source": "the_stack_v2_python_sparse", "source_path": "octavia/controller/worker/v2/tasks/network_tasks.py", "source_repo": "openstack/octavia", "split": "val", "star_events_count": 147} {"blob_id": "15454fd2e598a2d782cd4f01bdb9403bbdfe1a69", "bodies": ["model = Dog\nname = 'Dogs'\nsuper().__init__(model=model, collection_name=name)\nself.__dog_owner_repository = dog_owner_repository", "dogs = list()\nowners = self.__dog_owner_repository.search(f'owner_id=={owner_id}')\nfor dog_owner in owners.to_list():\n try:\n dog = self.read(dog_owner.dog_id)\n dogs.append(dog)\n except NotFoundError:\n pass\nreturn dogs"], "bodies_text": "<|body_start_0|>\n model = Dog\n name = 'Dogs'\n super().__init__(model=model, collection_name=name)\n self.__dog_owner_repository = dog_owner_repository\n<|end_body_0|>\n\n<|body_start_1|>\n dogs = list()\n owners = self.__dog_owner_repository.search(f'owner_id=={owner_id}')\n for dog_owner in owners.to_list():\n try:\n dog = self.read(dog_owner.dog_id)\n dogs.append(dog)\n except NotFoundError:\n pass\n return dogs\n<|end_body_1|>\n", "class_docstring": "Dog repository class.", "class_name": "DogRepository", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DogRepository:\n \"\"\"Dog repository class.\"\"\"\n\n def __init__(self, dog_owner_repository):\n \"\"\"Initialize dog repository.\"\"\"\n <|body_0|>\n\n def read_dogs_of_owner(self, owner_id):\n \"\"\"Get dogs associated with this user_id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n model = Dog\n name = 'Dogs'\n super().__init__(model=model, collection_name=name)\n self.__dog_owner_repository = dog_owner_repository\n<|end_body_0|>\n\n<|body_start_1|>\n dogs = list()\n owners = self.__dog_owner_repository.search(f'owner_id=={owner_id}')\n for dog_owner in owners.to_list():\n try:\n dog = self.read(dog_owner.dog_id)\n dogs.append(dog)\n except NotFoundError:\n pass\n return dogs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000393", "length_bytes": 950, "license_type": "no_license", "methods": [{"docstring": "Initialize dog repository.", "name": "__init__", "signature": "def __init__(self, dog_owner_repository)"}, {"docstring": "Get dogs associated with this user_id.", "name": "read_dogs_of_owner", "signature": "def read_dogs_of_owner(self, owner_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005218", "prompt": "Implement the Python class `DogRepository` described below.\n\nClass description:\nDog repository class.\n\nMethod signatures and docstrings:\n- def __init__(self, dog_owner_repository): Initialize dog repository.\n- def read_dogs_of_owner(self, owner_id): Get dogs associated with this user_id.", "prompted_full_text": "Implement the Python class `DogRepository` described below.\n\nClass description:\nDog repository class.\n\nMethod signatures and docstrings:\n- def __init__(self, dog_owner_repository): Initialize dog repository.\n- def read_dogs_of_owner(self, owner_id): Get dogs associated with this user_id.\n\n<|skeleton|>\nclass DogRepository:\n \"\"\"Dog repository class.\"\"\"\n\n def __init__(self, dog_owner_repository):\n \"\"\"Initialize dog repository.\"\"\"\n <|body_0|>\n\n def read_dogs_of_owner(self, owner_id):\n \"\"\"Get dogs associated with this user_id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n model = Dog\n name = 'Dogs'\n super().__init__(model=model, collection_name=name)\n self.__dog_owner_repository = dog_owner_repository\n<|end_body_0|>\n\n<|body_start_1|>\n dogs = list()\n owners = self.__dog_owner_repository.search(f'owner_id=={owner_id}')\n for dog_owner in owners.to_list():\n try:\n dog = self.read(dog_owner.dog_id)\n dogs.append(dog)\n except NotFoundError:\n pass\n return dogs\n<|end_body_1|>\n", "revision_id": "129dc7f8213fb3112c35b1551d9ed3d8a14b7fb5", "skeleton": "<|skeleton|>\nclass DogRepository:\n \"\"\"Dog repository class.\"\"\"\n\n def __init__(self, dog_owner_repository):\n \"\"\"Initialize dog repository.\"\"\"\n <|body_0|>\n\n def read_dogs_of_owner(self, owner_id):\n \"\"\"Get dogs associated with this user_id.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DogRepository:\n \"\"\"Dog repository class.\"\"\"\n\n def __init__(self, dog_owner_repository):\n \"\"\"Initialize dog repository.\"\"\"\n model = Dog\n name = 'Dogs'\n super().__init__(model=model, collection_name=name)\n self.__dog_owner_repository = dog_owner_repository\n\n def read_dogs_of_owner(self, owner_id):\n \"\"\"Get dogs associated with this user_id.\"\"\"\n dogs = list()\n owners = self.__dog_owner_repository.search(f'owner_id=={owner_id}')\n for dog_owner in owners.to_list():\n try:\n dog = self.read(dog_owner.dog_id)\n dogs.append(dog)\n except NotFoundError:\n pass\n return dogs\n", "source": "the_stack_v2_python_sparse", "source_path": "hugbunadarfr_backend/src/app/repository/repositories/dog_repository.py", "source_repo": "birna17/veff_hugb", "split": "val", "star_events_count": 0} {"blob_id": "9b70c7c1460040908010eb17edc77510fe4e1ff1", "bodies": ["node = _Node(value)\nif self._root is None:\n self._root = node\n return\ncurrent = self._root\nwhile True:\n if value < current.value:\n if not current.left:\n current.left = node\n return\n else:\n current = current.left\n elif not current.right:\n current.right = node\n return\n else:\n current = current.right", "if self._root == None:\n return False\nif self._root.value == value:\n return True\ncurrent = self._root\nwhile True:\n if value == current.value:\n return True\n if value > current.value:\n if current.right:\n current = current.right\n else:\n return False\n if value < current.value:\n if current.left:\n current = current.left\n else:\n return False"], "bodies_text": "<|body_start_0|>\n node = _Node(value)\n if self._root is None:\n self._root = node\n return\n current = self._root\n while True:\n if value < current.value:\n if not current.left:\n current.left = node\n return\n else:\n current = current.left\n elif not current.right:\n current.right = node\n return\n else:\n current = current.right\n<|end_body_0|>\n\n<|body_start_1|>\n if self._root == None:\n return False\n if self._root.value == value:\n return True\n current = self._root\n while True:\n if value == current.value:\n return True\n if value > current.value:\n if current.right:\n current = current.right\n else:\n return False\n if value < current.value:\n if current.left:\n current = current.left\n else:\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BinarySearchTree", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BinarySearchTree:\n\n def add(self, value):\n \"\"\"Adds node to a tree and places it dependent upon the rest of the tree\"\"\"\n <|body_0|>\n\n def contains(self, value):\n \"\"\"returns boolean that expresses whether or not value exists in binary tree\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n node = _Node(value)\n if self._root is None:\n self._root = node\n return\n current = self._root\n while True:\n if value < current.value:\n if not current.left:\n current.left = node\n return\n else:\n current = current.left\n elif not current.right:\n current.right = node\n return\n else:\n current = current.right\n<|end_body_0|>\n\n<|body_start_1|>\n if self._root == None:\n return False\n if self._root.value == value:\n return True\n current = self._root\n while True:\n if value == current.value:\n return True\n if value > current.value:\n if current.right:\n current = current.right\n else:\n return False\n if value < current.value:\n if current.left:\n current = current.left\n else:\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000394", "length_bytes": 5591, "license_type": "no_license", "methods": [{"docstring": "Adds node to a tree and places it dependent upon the rest of the tree", "name": "add", "signature": "def add(self, value)"}, {"docstring": "returns boolean that expresses whether or not value exists in binary tree", "name": "contains", "signature": "def contains(self, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005373", "prompt": "Implement the Python class `BinarySearchTree` described below.\n\nClass description:\nImplement the BinarySearchTree class.\n\nMethod signatures and docstrings:\n- def add(self, value): Adds node to a tree and places it dependent upon the rest of the tree\n- def contains(self, value): returns boolean that expresses whether or not value exists in binary tree", "prompted_full_text": "Implement the Python class `BinarySearchTree` described below.\n\nClass description:\nImplement the BinarySearchTree class.\n\nMethod signatures and docstrings:\n- def add(self, value): Adds node to a tree and places it dependent upon the rest of the tree\n- def contains(self, value): returns boolean that expresses whether or not value exists in binary tree\n\n<|skeleton|>\nclass BinarySearchTree:\n\n def add(self, value):\n \"\"\"Adds node to a tree and places it dependent upon the rest of the tree\"\"\"\n <|body_0|>\n\n def contains(self, value):\n \"\"\"returns boolean that expresses whether or not value exists in binary tree\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n node = _Node(value)\n if self._root is None:\n self._root = node\n return\n current = self._root\n while True:\n if value < current.value:\n if not current.left:\n current.left = node\n return\n else:\n current = current.left\n elif not current.right:\n current.right = node\n return\n else:\n current = current.right\n<|end_body_0|>\n\n<|body_start_1|>\n if self._root == None:\n return False\n if self._root.value == value:\n return True\n current = self._root\n while True:\n if value == current.value:\n return True\n if value > current.value:\n if current.right:\n current = current.right\n else:\n return False\n if value < current.value:\n if current.left:\n current = current.left\n else:\n return False\n<|end_body_1|>\n", "revision_id": "677f071a04a429b6ec8c307bd32cfb654bc8ec11", "skeleton": "<|skeleton|>\nclass BinarySearchTree:\n\n def add(self, value):\n \"\"\"Adds node to a tree and places it dependent upon the rest of the tree\"\"\"\n <|body_0|>\n\n def contains(self, value):\n \"\"\"returns boolean that expresses whether or not value exists in binary tree\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BinarySearchTree:\n def add(self, value):\n \"\"\"Adds node to a tree and places it dependent upon the rest of the tree\"\"\"\n node = _Node(value)\n if self._root is None:\n self._root = node\n return\n current = self._root\n while True:\n if value < current.value:\n if not current.left:\n current.left = node\n return\n else:\n current = current.left\n elif not current.right:\n current.right = node\n return\n else:\n current = current.right\n\n def contains(self, value):\n \"\"\"returns boolean that expresses whether or not value exists in binary tree\"\"\"\n if self._root == None:\n return False\n if self._root.value == value:\n return True\n current = self._root\n while True:\n if value == current.value:\n return True\n if value > current.value:\n if current.right:\n current = current.right\n else:\n return False\n if value < current.value:\n if current.left:\n current = current.left\n else:\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "data-structures/tree/tree.py", "source_repo": "Rayxclockwork/python-data-structures-and-algorithms", "split": "val", "star_events_count": 0} {"blob_id": "6c8eab7ac2d0dbdb7bd59c68071698a4a0b00c27", "bodies": ["sums = [0] * (len(nums) + 1)\nfor i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\nres = 0\nfor i in range(1, len(nums) + 1):\n for j in range(i):\n if sums[i] - sums[j] == k:\n res += 1\nreturn res", "pre_sum_freq = dict()\npre_sum_freq[0] = 1\npre_sum = 0\ncount = 0\nfor num in nums:\n pre_sum += num\n count += pre_sum_freq.get(pre_sum - k, 0)\n pre_sum_freq[pre_sum] = pre_sum_freq.get(pre_sum, 0) + 1\nreturn count"], "bodies_text": "<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n res = 0\n for i in range(1, len(nums) + 1):\n for j in range(i):\n if sums[i] - sums[j] == k:\n res += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n pre_sum_freq = dict()\n pre_sum_freq[0] = 1\n pre_sum = 0\n count = 0\n for num in nums:\n pre_sum += num\n count += pre_sum_freq.get(pre_sum - k, 0)\n pre_sum_freq[pre_sum] = pre_sum_freq.get(pre_sum, 0) + 1\n return count\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def subarraySum(self, nums: List[int], k: int) -> int:\n \"\"\"前缀和,数组长度20000 :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def subarraySum1(self, nums: List[int], k: int) -> int:\n \"\"\"使用字典优化前缀和 :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n res = 0\n for i in range(1, len(nums) + 1):\n for j in range(i):\n if sums[i] - sums[j] == k:\n res += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n pre_sum_freq = dict()\n pre_sum_freq[0] = 1\n pre_sum = 0\n count = 0\n for num in nums:\n pre_sum += num\n count += pre_sum_freq.get(pre_sum - k, 0)\n pre_sum_freq[pre_sum] = pre_sum_freq.get(pre_sum, 0) + 1\n return count\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000395", "length_bytes": 1228, "license_type": "no_license", "methods": [{"docstring": "前缀和,数组长度20000 :param nums: :param k: :return:", "name": "subarraySum", "signature": "def subarraySum(self, nums: List[int], k: int) -> int"}, {"docstring": "使用字典优化前缀和 :param nums: :param k: :return:", "name": "subarraySum1", "signature": "def subarraySum1(self, nums: List[int], k: int) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subarraySum(self, nums: List[int], k: int) -> int: 前缀和,数组长度20000 :param nums: :param k: :return:\n- def subarraySum1(self, nums: List[int], k: int) -> int: 使用字典优化前缀和 :param nums: :param k: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subarraySum(self, nums: List[int], k: int) -> int: 前缀和,数组长度20000 :param nums: :param k: :return:\n- def subarraySum1(self, nums: List[int], k: int) -> int: 使用字典优化前缀和 :param nums: :param k: :return:\n\n<|skeleton|>\nclass Solution:\n\n def subarraySum(self, nums: List[int], k: int) -> int:\n \"\"\"前缀和,数组长度20000 :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def subarraySum1(self, nums: List[int], k: int) -> int:\n \"\"\"使用字典优化前缀和 :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n res = 0\n for i in range(1, len(nums) + 1):\n for j in range(i):\n if sums[i] - sums[j] == k:\n res += 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n pre_sum_freq = dict()\n pre_sum_freq[0] = 1\n pre_sum = 0\n count = 0\n for num in nums:\n pre_sum += num\n count += pre_sum_freq.get(pre_sum - k, 0)\n pre_sum_freq[pre_sum] = pre_sum_freq.get(pre_sum, 0) + 1\n return count\n<|end_body_1|>\n", "revision_id": "9acba92695c06406f12f997a720bfe1deb9464a8", "skeleton": "<|skeleton|>\nclass Solution:\n\n def subarraySum(self, nums: List[int], k: int) -> int:\n \"\"\"前缀和,数组长度20000 :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def subarraySum1(self, nums: List[int], k: int) -> int:\n \"\"\"使用字典优化前缀和 :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n \"\"\"前缀和,数组长度20000 :param nums: :param k: :return:\"\"\"\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n res = 0\n for i in range(1, len(nums) + 1):\n for j in range(i):\n if sums[i] - sums[j] == k:\n res += 1\n return res\n\n def subarraySum1(self, nums: List[int], k: int) -> int:\n \"\"\"使用字典优化前缀和 :param nums: :param k: :return:\"\"\"\n pre_sum_freq = dict()\n pre_sum_freq[0] = 1\n pre_sum = 0\n count = 0\n for num in nums:\n pre_sum += num\n count += pre_sum_freq.get(pre_sum - k, 0)\n pre_sum_freq[pre_sum] = pre_sum_freq.get(pre_sum, 0) + 1\n return count\n", "source": "the_stack_v2_python_sparse", "source_path": "datastructure/binary_array/SubarraySum.py", "source_repo": "yinhuax/leet_code", "split": "val", "star_events_count": 0} {"blob_id": "526e5ca3596733572d1ea9beccfc470142857fc9", "bodies": ["if len(prices) <= 1:\n return 0\nK, n = (2, len(prices))\ndp = [[[0] * 2 for _ in range(K + 1)] for _ in range(len(prices))]\nfor i in range(n):\n for k in range(K, 0, -1):\n if i - 1 == -1:\n dp[i][k][0] = 0\n dp[i][k][1] = -prices[0]\n continue\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k][1], dp[i - 1][k - 1][0] - prices[i])\nreturn dp[n - 1][K][0]", "if len(prices) <= 1:\n return 0\nK, n = (2, len(prices))\ndp = [[0] * 2 for _ in range(K + 1)]\ndp_1 = [[0] * 2 for _ in range(K + 1)]\ndp_1[2][1] = -prices[0]\ndp_1[1][1] = -prices[0]\nfor i in range(1, n):\n for k in range(K, 0, -1):\n dp[k][0] = max(dp_1[k][0], dp_1[k][1] + prices[i])\n dp[k][1] = max(dp_1[k][1], dp_1[k - 1][0] - prices[i])\n dp, dp_1 = (dp_1, dp)\nreturn dp_1[K][0]", "if len(prices) <= 1:\n return 0\nK = 2\nmaxProf = 0\nf = [[0] * len(prices) for _ in range(K + 1)]\nfor kk in range(1, K + 1):\n tmpMax = f[kk - 1][0] - prices[0]\n for ii in range(1, len(prices)):\n f[kk][ii] = max(f[kk][ii - 1], prices[ii] + tmpMax)\n tmpMax = max(tmpMax, f[kk - 1][ii] - prices[ii])\n maxProf = max(maxProf, f[kk][ii])\nreturn maxProf", "if len(prices) <= 1:\n return 0\nsell1, sell2 = (0, 0)\nbuy1, buy2 = (-sys.maxsize, -sys.maxsize)\nfor p in prices:\n buy1 = max(buy1, -p)\n sell1 = max(sell1, p + buy1)\n buy2 = max(buy2, sell1 - p)\n sell2 = max(sell2, p + buy2)\nreturn sell2"], "bodies_text": "<|body_start_0|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(len(prices))]\n for i in range(n):\n for k in range(K, 0, -1):\n if i - 1 == -1:\n dp[i][k][0] = 0\n dp[i][k][1] = -prices[0]\n continue\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k][1], dp[i - 1][k - 1][0] - prices[i])\n return dp[n - 1][K][0]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[0] * 2 for _ in range(K + 1)]\n dp_1 = [[0] * 2 for _ in range(K + 1)]\n dp_1[2][1] = -prices[0]\n dp_1[1][1] = -prices[0]\n for i in range(1, n):\n for k in range(K, 0, -1):\n dp[k][0] = max(dp_1[k][0], dp_1[k][1] + prices[i])\n dp[k][1] = max(dp_1[k][1], dp_1[k - 1][0] - prices[i])\n dp, dp_1 = (dp_1, dp)\n return dp_1[K][0]\n<|end_body_1|>\n\n<|body_start_2|>\n if len(prices) <= 1:\n return 0\n K = 2\n maxProf = 0\n f = [[0] * len(prices) for _ in range(K + 1)]\n for kk in range(1, K + 1):\n tmpMax = f[kk - 1][0] - prices[0]\n for ii in range(1, len(prices)):\n f[kk][ii] = max(f[kk][ii - 1], prices[ii] + tmpMax)\n tmpMax = max(tmpMax, f[kk - 1][ii] - prices[ii])\n maxProf = max(maxProf, f[kk][ii])\n return maxProf\n<|end_body_2|>\n\n<|body_start_3|>\n if len(prices) <= 1:\n return 0\n sell1, sell2 = (0, 0)\n buy1, buy2 = (-sys.maxsize, -sys.maxsize)\n for p in prices:\n buy1 = max(buy1, -p)\n sell1 = max(sell1, p + buy1)\n buy2 = max(buy2, sell1 - p)\n sell2 = max(sell2, p + buy2)\n return sell2\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProfitDpPattern(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfitDpPattern2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\"\"\"\n <|body_2|>\n\n def maxProfitO1Space(self, prices):\n \"\"\":type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(len(prices))]\n for i in range(n):\n for k in range(K, 0, -1):\n if i - 1 == -1:\n dp[i][k][0] = 0\n dp[i][k][1] = -prices[0]\n continue\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k][1], dp[i - 1][k - 1][0] - prices[i])\n return dp[n - 1][K][0]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[0] * 2 for _ in range(K + 1)]\n dp_1 = [[0] * 2 for _ in range(K + 1)]\n dp_1[2][1] = -prices[0]\n dp_1[1][1] = -prices[0]\n for i in range(1, n):\n for k in range(K, 0, -1):\n dp[k][0] = max(dp_1[k][0], dp_1[k][1] + prices[i])\n dp[k][1] = max(dp_1[k][1], dp_1[k - 1][0] - prices[i])\n dp, dp_1 = (dp_1, dp)\n return dp_1[K][0]\n<|end_body_1|>\n\n<|body_start_2|>\n if len(prices) <= 1:\n return 0\n K = 2\n maxProf = 0\n f = [[0] * len(prices) for _ in range(K + 1)]\n for kk in range(1, K + 1):\n tmpMax = f[kk - 1][0] - prices[0]\n for ii in range(1, len(prices)):\n f[kk][ii] = max(f[kk][ii - 1], prices[ii] + tmpMax)\n tmpMax = max(tmpMax, f[kk - 1][ii] - prices[ii])\n maxProf = max(maxProf, f[kk][ii])\n return maxProf\n<|end_body_2|>\n\n<|body_start_3|>\n if len(prices) <= 1:\n return 0\n sell1, sell2 = (0, 0)\n buy1, buy2 = (-sys.maxsize, -sys.maxsize)\n for p in prices:\n buy1 = max(buy1, -p)\n sell1 = max(sell1, p + buy1)\n buy2 = max(buy2, sell1 - p)\n sell2 = max(sell2, p + buy2)\n return sell2\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000396", "length_bytes": 5100, "license_type": "no_license", "methods": [{"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfitDpPattern", "signature": "def maxProfitDpPattern(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfitDpPattern2", "signature": "def maxProfitDpPattern2(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])", "name": "maxProfitO1Space", "signature": "def maxProfitO1Space(self, prices)"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfitDpPattern(self, prices): :type prices: List[int] :rtype: int\n- def maxProfitDpPattern2(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\n- def maxProfitO1Space(self, prices): :type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfitDpPattern(self, prices): :type prices: List[int] :rtype: int\n- def maxProfitDpPattern2(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\n- def maxProfitO1Space(self, prices): :type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])\n\n<|skeleton|>\nclass Solution:\n\n def maxProfitDpPattern(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfitDpPattern2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\"\"\"\n <|body_2|>\n\n def maxProfitO1Space(self, prices):\n \"\"\":type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(len(prices))]\n for i in range(n):\n for k in range(K, 0, -1):\n if i - 1 == -1:\n dp[i][k][0] = 0\n dp[i][k][1] = -prices[0]\n continue\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k][1], dp[i - 1][k - 1][0] - prices[i])\n return dp[n - 1][K][0]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[0] * 2 for _ in range(K + 1)]\n dp_1 = [[0] * 2 for _ in range(K + 1)]\n dp_1[2][1] = -prices[0]\n dp_1[1][1] = -prices[0]\n for i in range(1, n):\n for k in range(K, 0, -1):\n dp[k][0] = max(dp_1[k][0], dp_1[k][1] + prices[i])\n dp[k][1] = max(dp_1[k][1], dp_1[k - 1][0] - prices[i])\n dp, dp_1 = (dp_1, dp)\n return dp_1[K][0]\n<|end_body_1|>\n\n<|body_start_2|>\n if len(prices) <= 1:\n return 0\n K = 2\n maxProf = 0\n f = [[0] * len(prices) for _ in range(K + 1)]\n for kk in range(1, K + 1):\n tmpMax = f[kk - 1][0] - prices[0]\n for ii in range(1, len(prices)):\n f[kk][ii] = max(f[kk][ii - 1], prices[ii] + tmpMax)\n tmpMax = max(tmpMax, f[kk - 1][ii] - prices[ii])\n maxProf = max(maxProf, f[kk][ii])\n return maxProf\n<|end_body_2|>\n\n<|body_start_3|>\n if len(prices) <= 1:\n return 0\n sell1, sell2 = (0, 0)\n buy1, buy2 = (-sys.maxsize, -sys.maxsize)\n for p in prices:\n buy1 = max(buy1, -p)\n sell1 = max(sell1, p + buy1)\n buy2 = max(buy2, sell1 - p)\n sell2 = max(sell2, p + buy2)\n return sell2\n<|end_body_3|>\n", "revision_id": "810575368ecffa97677bdb51744d1f716140bbb1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProfitDpPattern(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfitDpPattern2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\"\"\"\n <|body_2|>\n\n def maxProfitO1Space(self, prices):\n \"\"\":type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxProfitDpPattern(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[[0] * 2 for _ in range(K + 1)] for _ in range(len(prices))]\n for i in range(n):\n for k in range(K, 0, -1):\n if i - 1 == -1:\n dp[i][k][0] = 0\n dp[i][k][1] = -prices[0]\n continue\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k][1], dp[i - 1][k - 1][0] - prices[i])\n return dp[n - 1][K][0]\n\n def maxProfitDpPattern2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n if len(prices) <= 1:\n return 0\n K, n = (2, len(prices))\n dp = [[0] * 2 for _ in range(K + 1)]\n dp_1 = [[0] * 2 for _ in range(K + 1)]\n dp_1[2][1] = -prices[0]\n dp_1[1][1] = -prices[0]\n for i in range(1, n):\n for k in range(K, 0, -1):\n dp[k][0] = max(dp_1[k][0], dp_1[k][1] + prices[i])\n dp[k][1] = max(dp_1[k][1], dp_1[k - 1][0] - prices[i])\n dp, dp_1 = (dp_1, dp)\n return dp_1[K][0]\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int f[k, ii] represents the max profit up until prices[ii] (Note: NOT ending with prices[ii]) using at most k transactions. f[k, ii] = max(f[k, ii-1], prices[ii] - prices[jj] + f[k-1, jj]) { jj in range of [0, ii-1] } = max(f[k, ii-1], prices[ii] + max(f[k-1, jj] - prices[jj])) f[0, ii] = 0; 0 times transation makes 0 profit f[k, 0] = 0; if there is only one price data point you can't make any money no matter how many times you can trade\"\"\"\n if len(prices) <= 1:\n return 0\n K = 2\n maxProf = 0\n f = [[0] * len(prices) for _ in range(K + 1)]\n for kk in range(1, K + 1):\n tmpMax = f[kk - 1][0] - prices[0]\n for ii in range(1, len(prices)):\n f[kk][ii] = max(f[kk][ii - 1], prices[ii] + tmpMax)\n tmpMax = max(tmpMax, f[kk - 1][ii] - prices[ii])\n maxProf = max(maxProf, f[kk][ii])\n return maxProf\n\n def maxProfitO1Space(self, prices):\n \"\"\":type prices: List[int] :rtype: int The transition relation is constructed by the following four equations. Actually, sell2 is the only state we record for iterations. The others are intermediate states. buy1 and *sell1 *are for the first transaction. buy2 and *sell2 *are for the second transaction. Transition relation: buy1[i] = max( - prices[i], buy1[i - 1]) sell1[i] = max(buy1[i - 1] + price[i], sell1[i - 1]) buy2[i] = max( sell1[i -1] - prices[i], buy2[i - 1]) sell2[i] = max(buy2[i - 1] + price[i], sell2[i - 1])\"\"\"\n if len(prices) <= 1:\n return 0\n sell1, sell2 = (0, 0)\n buy1, buy2 = (-sys.maxsize, -sys.maxsize)\n for p in prices:\n buy1 = max(buy1, -p)\n sell1 = max(sell1, p + buy1)\n buy2 = max(buy2, sell1 - p)\n sell2 = max(sell2, p + buy2)\n return sell2\n", "source": "the_stack_v2_python_sparse", "source_path": "B/BestTimetoBuyandSellStockIII.py", "source_repo": "bssrdf/pyleet", "split": "val", "star_events_count": 2} {"blob_id": "ac2caca029ef18b224e524c40a882594f63e2b05", "bodies": ["entropy = frac * self.entropy / self.count\nnew_sample_args = _SampleArgs(self.count, self.entropy - entropy)\nreturn (entropy, new_sample_args)", "non_integer_count = sum((not arg.is_Integer for arg in args))\nassert non_integer_count <= self.count - 1\ncount_split = combinatorics.uniform_non_negative_integers_with_sum(len(args), self.count - 1 - non_integer_count)\nfor i, arg in enumerate(args):\n if not arg.is_Integer:\n count_split[i] += 1\nif all((count == 0 for count in count_split)):\n assert self.entropy == 0\n entropies = np.zeros(len(count_split))\nelse:\n entropies = np.random.dirichlet(np.maximum(1e-09, count_split)) * self.entropy\nreturn [_SampleArgs(op_count, entropy) for op_count, entropy in zip(count_split, entropies)]"], "bodies_text": "<|body_start_0|>\n entropy = frac * self.entropy / self.count\n new_sample_args = _SampleArgs(self.count, self.entropy - entropy)\n return (entropy, new_sample_args)\n<|end_body_0|>\n\n<|body_start_1|>\n non_integer_count = sum((not arg.is_Integer for arg in args))\n assert non_integer_count <= self.count - 1\n count_split = combinatorics.uniform_non_negative_integers_with_sum(len(args), self.count - 1 - non_integer_count)\n for i, arg in enumerate(args):\n if not arg.is_Integer:\n count_split[i] += 1\n if all((count == 0 for count in count_split)):\n assert self.entropy == 0\n entropies = np.zeros(len(count_split))\n else:\n entropies = np.random.dirichlet(np.maximum(1e-09, count_split)) * self.entropy\n return [_SampleArgs(op_count, entropy) for op_count, entropy in zip(count_split, entropies)]\n<|end_body_1|>\n", "class_docstring": "For sampling mathematical expressions.", "class_name": "_SampleArgs", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _SampleArgs:\n \"\"\"For sampling mathematical expressions.\"\"\"\n\n def peel(self, frac=1):\n \"\"\"Peels one (or `frac`) of an op's entropy.\"\"\"\n <|body_0|>\n\n def split(self, args):\n \"\"\"Splits the entropy and op counts up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n entropy = frac * self.entropy / self.count\n new_sample_args = _SampleArgs(self.count, self.entropy - entropy)\n return (entropy, new_sample_args)\n<|end_body_0|>\n\n<|body_start_1|>\n non_integer_count = sum((not arg.is_Integer for arg in args))\n assert non_integer_count <= self.count - 1\n count_split = combinatorics.uniform_non_negative_integers_with_sum(len(args), self.count - 1 - non_integer_count)\n for i, arg in enumerate(args):\n if not arg.is_Integer:\n count_split[i] += 1\n if all((count == 0 for count in count_split)):\n assert self.entropy == 0\n entropies = np.zeros(len(count_split))\n else:\n entropies = np.random.dirichlet(np.maximum(1e-09, count_split)) * self.entropy\n return [_SampleArgs(op_count, entropy) for op_count, entropy in zip(count_split, entropies)]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000397", "length_bytes": 9010, "license_type": "permissive", "methods": [{"docstring": "Peels one (or `frac`) of an op's entropy.", "name": "peel", "signature": "def peel(self, frac=1)"}, {"docstring": "Splits the entropy and op counts up.", "name": "split", "signature": "def split(self, args)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002440", "prompt": "Implement the Python class `_SampleArgs` described below.\n\nClass description:\nFor sampling mathematical expressions.\n\nMethod signatures and docstrings:\n- def peel(self, frac=1): Peels one (or `frac`) of an op's entropy.\n- def split(self, args): Splits the entropy and op counts up.", "prompted_full_text": "Implement the Python class `_SampleArgs` described below.\n\nClass description:\nFor sampling mathematical expressions.\n\nMethod signatures and docstrings:\n- def peel(self, frac=1): Peels one (or `frac`) of an op's entropy.\n- def split(self, args): Splits the entropy and op counts up.\n\n<|skeleton|>\nclass _SampleArgs:\n \"\"\"For sampling mathematical expressions.\"\"\"\n\n def peel(self, frac=1):\n \"\"\"Peels one (or `frac`) of an op's entropy.\"\"\"\n <|body_0|>\n\n def split(self, args):\n \"\"\"Splits the entropy and op counts up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n entropy = frac * self.entropy / self.count\n new_sample_args = _SampleArgs(self.count, self.entropy - entropy)\n return (entropy, new_sample_args)\n<|end_body_0|>\n\n<|body_start_1|>\n non_integer_count = sum((not arg.is_Integer for arg in args))\n assert non_integer_count <= self.count - 1\n count_split = combinatorics.uniform_non_negative_integers_with_sum(len(args), self.count - 1 - non_integer_count)\n for i, arg in enumerate(args):\n if not arg.is_Integer:\n count_split[i] += 1\n if all((count == 0 for count in count_split)):\n assert self.entropy == 0\n entropies = np.zeros(len(count_split))\n else:\n entropies = np.random.dirichlet(np.maximum(1e-09, count_split)) * self.entropy\n return [_SampleArgs(op_count, entropy) for op_count, entropy in zip(count_split, entropies)]\n<|end_body_1|>\n", "revision_id": "4fd371919d57258dcdedaa21b111fa61ee0a771f", "skeleton": "<|skeleton|>\nclass _SampleArgs:\n \"\"\"For sampling mathematical expressions.\"\"\"\n\n def peel(self, frac=1):\n \"\"\"Peels one (or `frac`) of an op's entropy.\"\"\"\n <|body_0|>\n\n def split(self, args):\n \"\"\"Splits the entropy and op counts up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _SampleArgs:\n \"\"\"For sampling mathematical expressions.\"\"\"\n\n def peel(self, frac=1):\n \"\"\"Peels one (or `frac`) of an op's entropy.\"\"\"\n entropy = frac * self.entropy / self.count\n new_sample_args = _SampleArgs(self.count, self.entropy - entropy)\n return (entropy, new_sample_args)\n\n def split(self, args):\n \"\"\"Splits the entropy and op counts up.\"\"\"\n non_integer_count = sum((not arg.is_Integer for arg in args))\n assert non_integer_count <= self.count - 1\n count_split = combinatorics.uniform_non_negative_integers_with_sum(len(args), self.count - 1 - non_integer_count)\n for i, arg in enumerate(args):\n if not arg.is_Integer:\n count_split[i] += 1\n if all((count == 0 for count in count_split)):\n assert self.entropy == 0\n entropies = np.zeros(len(count_split))\n else:\n entropies = np.random.dirichlet(np.maximum(1e-09, count_split)) * self.entropy\n return [_SampleArgs(op_count, entropy) for op_count, entropy in zip(count_split, entropies)]\n", "source": "the_stack_v2_python_sparse", "source_path": "mathematics_dataset/sample/arithmetic.py", "source_repo": "AhmedHathout/mathematics_dataset", "split": "val", "star_events_count": 0} {"blob_id": "45c6f0883309f6a71d663cb5174134d7344ff60d", "bodies": ["self.__robot = robot\nself.__frame_id = frame_id\nself.__joint_prefix = joint_prefix\nself.__node = node\nself.__sensors = []\nself.__timestep = int(robot.getBasicTimeStep())\nself.__last_joint_states = None\nself.__previous_time = 0\nself.__previous_position = []\nself.__joint_names = []\nfor i in range(robot.getNumberOfDevices()):\n device = robot.getDeviceByIndex(i)\n if device.getNodeType() == Node.POSITION_SENSOR:\n motor = device.getMotor()\n name = motor.getName() if motor is not None else device.getName()\n self.__joint_names.append(name)\n self.__sensors.append(device)\n self.__previous_position.append(0)\n device.enable(self.__timestep)\nself.__publisher = self.__node.create_publisher(JointState, 'joint_states', 1)", "msg = JointState()\nmsg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\nmsg.header.frame_id = self.__frame_id\nmsg.name = [s + self.__joint_prefix for s in self.__joint_names]\nmsg.position = []\ntime_difference = self.__robot.getTime() - self.__previous_time\nfor i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) / time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\nmsg.effort = [0.0] * 6\nself.__publisher.publish(msg)\nself.__last_joint_states = msg\nself.__previous_time = self.__robot.getTime()"], "bodies_text": "<|body_start_0|>\n self.__robot = robot\n self.__frame_id = frame_id\n self.__joint_prefix = joint_prefix\n self.__node = node\n self.__sensors = []\n self.__timestep = int(robot.getBasicTimeStep())\n self.__last_joint_states = None\n self.__previous_time = 0\n self.__previous_position = []\n self.__joint_names = []\n for i in range(robot.getNumberOfDevices()):\n device = robot.getDeviceByIndex(i)\n if device.getNodeType() == Node.POSITION_SENSOR:\n motor = device.getMotor()\n name = motor.getName() if motor is not None else device.getName()\n self.__joint_names.append(name)\n self.__sensors.append(device)\n self.__previous_position.append(0)\n device.enable(self.__timestep)\n self.__publisher = self.__node.create_publisher(JointState, 'joint_states', 1)\n<|end_body_0|>\n\n<|body_start_1|>\n msg = JointState()\n msg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\n msg.header.frame_id = self.__frame_id\n msg.name = [s + self.__joint_prefix for s in self.__joint_names]\n msg.position = []\n time_difference = self.__robot.getTime() - self.__previous_time\n for i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) / time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\n msg.effort = [0.0] * 6\n self.__publisher.publish(msg)\n self.__last_joint_states = msg\n self.__previous_time = self.__robot.getTime()\n<|end_body_1|>\n", "class_docstring": "Publishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.", "class_name": "JointStatePublisher", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JointStatePublisher:\n \"\"\"Publishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\"\"\"\n\n def __init__(self, robot, joint_prefix, node, frame_id='joint_states'):\n \"\"\"Initialize the position sensors and the topic.\"\"\"\n <|body_0|>\n\n def publish(self):\n \"\"\"Publish the 'joint_states' topic with up to date value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__robot = robot\n self.__frame_id = frame_id\n self.__joint_prefix = joint_prefix\n self.__node = node\n self.__sensors = []\n self.__timestep = int(robot.getBasicTimeStep())\n self.__last_joint_states = None\n self.__previous_time = 0\n self.__previous_position = []\n self.__joint_names = []\n for i in range(robot.getNumberOfDevices()):\n device = robot.getDeviceByIndex(i)\n if device.getNodeType() == Node.POSITION_SENSOR:\n motor = device.getMotor()\n name = motor.getName() if motor is not None else device.getName()\n self.__joint_names.append(name)\n self.__sensors.append(device)\n self.__previous_position.append(0)\n device.enable(self.__timestep)\n self.__publisher = self.__node.create_publisher(JointState, 'joint_states', 1)\n<|end_body_0|>\n\n<|body_start_1|>\n msg = JointState()\n msg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\n msg.header.frame_id = self.__frame_id\n msg.name = [s + self.__joint_prefix for s in self.__joint_names]\n msg.position = []\n time_difference = self.__robot.getTime() - self.__previous_time\n for i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) / time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\n msg.effort = [0.0] * 6\n self.__publisher.publish(msg)\n self.__last_joint_states = msg\n self.__previous_time = self.__robot.getTime()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000398", "length_bytes": 3140, "license_type": "permissive", "methods": [{"docstring": "Initialize the position sensors and the topic.", "name": "__init__", "signature": "def __init__(self, robot, joint_prefix, node, frame_id='joint_states')"}, {"docstring": "Publish the 'joint_states' topic with up to date value.", "name": "publish", "signature": "def publish(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000113", "prompt": "Implement the Python class `JointStatePublisher` described below.\n\nClass description:\nPublishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\n\nMethod signatures and docstrings:\n- def __init__(self, robot, joint_prefix, node, frame_id='joint_states'): Initialize the position sensors and the topic.\n- def publish(self): Publish the 'joint_states' topic with up to date value.", "prompted_full_text": "Implement the Python class `JointStatePublisher` described below.\n\nClass description:\nPublishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\n\nMethod signatures and docstrings:\n- def __init__(self, robot, joint_prefix, node, frame_id='joint_states'): Initialize the position sensors and the topic.\n- def publish(self): Publish the 'joint_states' topic with up to date value.\n\n<|skeleton|>\nclass JointStatePublisher:\n \"\"\"Publishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\"\"\"\n\n def __init__(self, robot, joint_prefix, node, frame_id='joint_states'):\n \"\"\"Initialize the position sensors and the topic.\"\"\"\n <|body_0|>\n\n def publish(self):\n \"\"\"Publish the 'joint_states' topic with up to date value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__robot = robot\n self.__frame_id = frame_id\n self.__joint_prefix = joint_prefix\n self.__node = node\n self.__sensors = []\n self.__timestep = int(robot.getBasicTimeStep())\n self.__last_joint_states = None\n self.__previous_time = 0\n self.__previous_position = []\n self.__joint_names = []\n for i in range(robot.getNumberOfDevices()):\n device = robot.getDeviceByIndex(i)\n if device.getNodeType() == Node.POSITION_SENSOR:\n motor = device.getMotor()\n name = motor.getName() if motor is not None else device.getName()\n self.__joint_names.append(name)\n self.__sensors.append(device)\n self.__previous_position.append(0)\n device.enable(self.__timestep)\n self.__publisher = self.__node.create_publisher(JointState, 'joint_states', 1)\n<|end_body_0|>\n\n<|body_start_1|>\n msg = JointState()\n msg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\n msg.header.frame_id = self.__frame_id\n msg.name = [s + self.__joint_prefix for s in self.__joint_names]\n msg.position = []\n time_difference = self.__robot.getTime() - self.__previous_time\n for i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) / time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\n msg.effort = [0.0] * 6\n self.__publisher.publish(msg)\n self.__last_joint_states = msg\n self.__previous_time = self.__robot.getTime()\n<|end_body_1|>\n", "revision_id": "08a061e73e3b88d57cc27b662be0f907d8b9f15b", "skeleton": "<|skeleton|>\nclass JointStatePublisher:\n \"\"\"Publishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\"\"\"\n\n def __init__(self, robot, joint_prefix, node, frame_id='joint_states'):\n \"\"\"Initialize the position sensors and the topic.\"\"\"\n <|body_0|>\n\n def publish(self):\n \"\"\"Publish the 'joint_states' topic with up to date value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class JointStatePublisher:\n \"\"\"Publishes joint states. Discovers all joints with positional sensors and publishes corresponding ROS2 messages of type [`sensor_msgs/JointState`](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/JointState.msg). Args: robot (WebotsNode): Webots Robot node. jointPrefix (str): Prefix to all joint names. node (Node): ROS2 node.\"\"\"\n\n def __init__(self, robot, joint_prefix, node, frame_id='joint_states'):\n \"\"\"Initialize the position sensors and the topic.\"\"\"\n self.__robot = robot\n self.__frame_id = frame_id\n self.__joint_prefix = joint_prefix\n self.__node = node\n self.__sensors = []\n self.__timestep = int(robot.getBasicTimeStep())\n self.__last_joint_states = None\n self.__previous_time = 0\n self.__previous_position = []\n self.__joint_names = []\n for i in range(robot.getNumberOfDevices()):\n device = robot.getDeviceByIndex(i)\n if device.getNodeType() == Node.POSITION_SENSOR:\n motor = device.getMotor()\n name = motor.getName() if motor is not None else device.getName()\n self.__joint_names.append(name)\n self.__sensors.append(device)\n self.__previous_position.append(0)\n device.enable(self.__timestep)\n self.__publisher = self.__node.create_publisher(JointState, 'joint_states', 1)\n\n def publish(self):\n \"\"\"Publish the 'joint_states' topic with up to date value.\"\"\"\n msg = JointState()\n msg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\n msg.header.frame_id = self.__frame_id\n msg.name = [s + self.__joint_prefix for s in self.__joint_names]\n msg.position = []\n time_difference = self.__robot.getTime() - self.__previous_time\n for i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) / time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\n msg.effort = [0.0] * 6\n self.__publisher.publish(msg)\n self.__last_joint_states = msg\n self.__previous_time = self.__robot.getTime()\n", "source": "the_stack_v2_python_sparse", "source_path": "webots_ros2_core/webots_ros2_core/joint_state_publisher.py", "source_repo": "harshag37/webots_ros2", "split": "val", "star_events_count": 1} {"blob_id": "125ee024fb9d112d43625ea9ff1bc742e36ed1ef", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.", "class_name": "OperationsServicer", "detected_licenses": ["LicenseRef-scancode-python-cwi", "GPL-1.0-or-later", "Python-2.0", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-free-unknown", "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.0-or-later", "MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OperationsServicer:\n \"\"\"Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\"\"\"\n\n def GetOperation(self, request, context):\n \"\"\"Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\"\"\"\n <|body_0|>\n\n def ListOperations(self, request, context):\n \"\"\"Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\"\"\"\n <|body_1|>\n\n def CancelOperation(self, request, context):\n \"\"\"Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\"\"\"\n <|body_2|>\n\n def DeleteOperation(self, request, context):\n \"\"\"Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000399", "length_bytes": 13771, "license_type": "permissive", "methods": [{"docstring": "Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.", "name": "GetOperation", "signature": "def GetOperation(self, request, context)"}, {"docstring": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", "name": "ListOperations", "signature": "def ListOperations(self, request, context)"}, {"docstring": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.", "name": "CancelOperation", "signature": "def CancelOperation(self, request, context)"}, {"docstring": "Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.", "name": "DeleteOperation", "signature": "def DeleteOperation(self, request, context)"}], "n_methods": 4, "prompt": "Implement the Python class `OperationsServicer` described below.\n\nClass description:\nManages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\n\nMethod signatures and docstrings:\n- def GetOperation(self, request, context): Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\n- def ListOperations(self, request, context): Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\n- def CancelOperation(self, request, context): Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\n- def DeleteOperation(self, request, context): Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.", "prompted_full_text": "Implement the Python class `OperationsServicer` described below.\n\nClass description:\nManages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\n\nMethod signatures and docstrings:\n- def GetOperation(self, request, context): Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\n- def ListOperations(self, request, context): Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\n- def CancelOperation(self, request, context): Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\n- def DeleteOperation(self, request, context): Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.\n\n<|skeleton|>\nclass OperationsServicer:\n \"\"\"Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\"\"\"\n\n def GetOperation(self, request, context):\n \"\"\"Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\"\"\"\n <|body_0|>\n\n def ListOperations(self, request, context):\n \"\"\"Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\"\"\"\n <|body_1|>\n\n def CancelOperation(self, request, context):\n \"\"\"Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\"\"\"\n <|body_2|>\n\n def DeleteOperation(self, request, context):\n \"\"\"Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass OperationsServicer:\n \"\"\"Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\"\"\"\n\n def GetOperation(self, request, context):\n \"\"\"Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\"\"\"\n <|body_0|>\n\n def ListOperations(self, request, context):\n \"\"\"Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\"\"\"\n <|body_1|>\n\n def CancelOperation(self, request, context):\n \"\"\"Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\"\"\"\n <|body_2|>\n\n def DeleteOperation(self, request, context):\n \"\"\"Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OperationsServicer:\n \"\"\"Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience.\"\"\"\n\n def GetOperation(self, request, context):\n \"\"\"Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ListOperations(self, request, context):\n \"\"\"Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CancelOperation(self, request, context):\n \"\"\"Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteOperation(self, request, context):\n \"\"\"Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/chromite/third_party/gcloud/bigtable/_generated/operations_grpc_pb2.py", "source_repo": "metux/chromium-suckless", "split": "val", "star_events_count": 5} {"blob_id": "f24ac3e0fdb91b8a4ff34b3af8197c8e2ce210da", "bodies": ["total, n = (0, len(nums))\nfor currBit in range(31):\n currOnes = 0\n for num in nums:\n currOnes += num >> currBit & 1\n total += currOnes * (n - currOnes)\nreturn total", "total = 0\nfor b in zip(*map('{:030b}'.format, nums)):\n zeros = b.count('0')\n total += zeros * (len(b) - zeros)\nreturn total"], "bodies_text": "<|body_start_0|>\n total, n = (0, len(nums))\n for currBit in range(31):\n currOnes = 0\n for num in nums:\n currOnes += num >> currBit & 1\n total += currOnes * (n - currOnes)\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n total = 0\n for b in zip(*map('{:030b}'.format, nums)):\n zeros = b.count('0')\n total += zeros * (len(b) - zeros)\n return total\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\"\"\"\n <|body_0|>\n\n def totalHammingDistance2(self, nums: List[int]) -> int:\n \"\"\"Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total, n = (0, len(nums))\n for currBit in range(31):\n currOnes = 0\n for num in nums:\n currOnes += num >> currBit & 1\n total += currOnes * (n - currOnes)\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n total = 0\n for b in zip(*map('{:030b}'.format, nums)):\n zeros = b.count('0')\n total += zeros * (len(b) - zeros)\n return total\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000400", "length_bytes": 1729, "license_type": "no_license", "methods": [{"docstring": "According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.", "name": "totalHammingDistance", "signature": "def totalHammingDistance(self, nums: List[int]) -> int"}, {"docstring": "Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.", "name": "totalHammingDistance2", "signature": "def totalHammingDistance2(self, nums: List[int]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000387", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def totalHammingDistance(self, nums: List[int]) -> int: According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\n- def totalHammingDistance2(self, nums: List[int]) -> int: Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def totalHammingDistance(self, nums: List[int]) -> int: According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\n- def totalHammingDistance2(self, nums: List[int]) -> int: Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.\n\n<|skeleton|>\nclass Solution:\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\"\"\"\n <|body_0|>\n\n def totalHammingDistance2(self, nums: List[int]) -> int:\n \"\"\"Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total, n = (0, len(nums))\n for currBit in range(31):\n currOnes = 0\n for num in nums:\n currOnes += num >> currBit & 1\n total += currOnes * (n - currOnes)\n return total\n<|end_body_0|>\n\n<|body_start_1|>\n total = 0\n for b in zip(*map('{:030b}'.format, nums)):\n zeros = b.count('0')\n total += zeros * (len(b) - zeros)\n return total\n<|end_body_1|>\n", "revision_id": "edb870f83f0c4568cce0cacec04ee70cf6b545bf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\"\"\"\n <|body_0|>\n\n def totalHammingDistance2(self, nums: List[int]) -> int:\n \"\"\"Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def totalHammingDistance(self, nums: List[int]) -> int:\n \"\"\"According to the question's statement, the numbers in the input list is less than 10^9, which is less than 2^30. So for each bit of the input number, if there are k numbers with that bit as 1 while n - k numbers with that bit as 0, then they could form k * (n - k) pairs of difference for that bit, where n is the total length of the input list.\"\"\"\n total, n = (0, len(nums))\n for currBit in range(31):\n currOnes = 0\n for num in nums:\n currOnes += num >> currBit & 1\n total += currOnes * (n - currOnes)\n return total\n\n def totalHammingDistance2(self, nums: List[int]) -> int:\n \"\"\"Same idea as the above, but turning it into char group for each bit first: 1. map('{:030b}'.format, nums) will return an iterator that turns each numbers in the input list to a 30 bits binary format string filled with leading zeros, for example: {:030b}'.format(100) = '000000000000000000000001100100'. 2. *map will make each string as a parameter to the zip function. 3. Then the zip function will take each char at the same position from each string and form a tuple, in other words, it will take all the bit at the same position from each number in the input list.\"\"\"\n total = 0\n for b in zip(*map('{:030b}'.format, nums)):\n zeros = b.count('0')\n total += zeros * (len(b) - zeros)\n return total\n", "source": "the_stack_v2_python_sparse", "source_path": "2020/total_hamming_distance.py", "source_repo": "eronekogin/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "bd99975a2e10405a5a980d0e8d460aed4ebe8fd0", "bodies": ["DriverClient.__init__(self)\nself.host = host\nself.cmd_port = cmd_port\nself.event_port = event_port\nself.cmd_host_string = 'tcp://%s:%i' % (self.host, self.cmd_port)\nself.event_host_string = 'tcp://%s:%i' % (self.host, self.event_port)\nself.zmq_context = None\nself.zmq_cmd_socket = None\nself.event_thread = None\nself.stop_event_thread = True", "self.zmq_context = zmq.Context()\nself.zmq_cmd_socket = self.zmq_context.socket(zmq.REQ)\nself.zmq_cmd_socket.connect(self.cmd_host_string)\nlog.info('Driver client cmd socket connected to %s.' % self.cmd_host_string)\nself.evt_callback = evt_callback\n\ndef recv_evt_messages(driver_client):\n \"\"\"\n A looping function that monitors a ZMQ SUB socket for asynchronous\n driver events. Can be run as a thread or greenlet.\n @param driver_client The client object that launches the thread.\n \"\"\"\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(driver_client.event_host_string)\n sock.setsockopt(zmq.SUBSCRIBE, '')\n log.info('Driver client event thread connected to %s.' % driver_client.event_host_string)\n driver_client.stop_event_thread = False\n while not driver_client.stop_event_thread:\n try:\n evt = sock.recv_pyobj(flags=zmq.NOBLOCK)\n log.debug('got event: %s' % str(evt))\n if driver_client.evt_callback:\n driver_client.evt_callback(evt)\n except zmq.ZMQError:\n time.sleep(0.5)\n sock.close()\n context.term()\n log.info('Client event socket closed.')\nself.event_thread = thread.start_new_thread(recv_evt_messages, (self,))\nlog.info('Driver client messaging started.')", "self.zmq_cmd_socket.close()\nself.zmq_cmd_socket = None\nself.zmq_context.term()\nself.zmq_context = None\nself.stop_event_thread = True\nself.event_thread = None\nself.evt_callback = None\nlog.info('Driver client messaging closed.')", "msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}\nlog.debug('Sending command %s.' % str(msg))\nwhile True:\n try:\n self.zmq_cmd_socket.send_pyobj(msg)\n if msg == 'stop_driver_process':\n return 'driver stopping'\n break\n except zmq.ZMQError:\n time.sleep(0.5)\nlog.debug('Awaiting reply.')\nwhile True:\n try:\n reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)\n break\n except zmq.ZMQError:\n time.sleep(0.5)\nlog.debug('Reply: %s.' % str(reply))\nif isinstance(reply, Exception):\n raise reply\nelse:\n return reply"], "bodies_text": "<|body_start_0|>\n DriverClient.__init__(self)\n self.host = host\n self.cmd_port = cmd_port\n self.event_port = event_port\n self.cmd_host_string = 'tcp://%s:%i' % (self.host, self.cmd_port)\n self.event_host_string = 'tcp://%s:%i' % (self.host, self.event_port)\n self.zmq_context = None\n self.zmq_cmd_socket = None\n self.event_thread = None\n self.stop_event_thread = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.zmq_context = zmq.Context()\n self.zmq_cmd_socket = self.zmq_context.socket(zmq.REQ)\n self.zmq_cmd_socket.connect(self.cmd_host_string)\n log.info('Driver client cmd socket connected to %s.' % self.cmd_host_string)\n self.evt_callback = evt_callback\n\n def recv_evt_messages(driver_client):\n \"\"\"\n A looping function that monitors a ZMQ SUB socket for asynchronous\n driver events. Can be run as a thread or greenlet.\n @param driver_client The client object that launches the thread.\n \"\"\"\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(driver_client.event_host_string)\n sock.setsockopt(zmq.SUBSCRIBE, '')\n log.info('Driver client event thread connected to %s.' % driver_client.event_host_string)\n driver_client.stop_event_thread = False\n while not driver_client.stop_event_thread:\n try:\n evt = sock.recv_pyobj(flags=zmq.NOBLOCK)\n log.debug('got event: %s' % str(evt))\n if driver_client.evt_callback:\n driver_client.evt_callback(evt)\n except zmq.ZMQError:\n time.sleep(0.5)\n sock.close()\n context.term()\n log.info('Client event socket closed.')\n self.event_thread = thread.start_new_thread(recv_evt_messages, (self,))\n log.info('Driver client messaging started.')\n<|end_body_1|>\n\n<|body_start_2|>\n self.zmq_cmd_socket.close()\n self.zmq_cmd_socket = None\n self.zmq_context.term()\n self.zmq_context = None\n self.stop_event_thread = True\n self.event_thread = None\n self.evt_callback = None\n log.info('Driver client messaging closed.')\n<|end_body_2|>\n\n<|body_start_3|>\n msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}\n log.debug('Sending command %s.' % str(msg))\n while True:\n try:\n self.zmq_cmd_socket.send_pyobj(msg)\n if msg == 'stop_driver_process':\n return 'driver stopping'\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Awaiting reply.')\n while True:\n try:\n reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Reply: %s.' % str(reply))\n if isinstance(reply, Exception):\n raise reply\n else:\n return reply\n<|end_body_3|>\n", "class_docstring": "A class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.", "class_name": "ZmqDriverClient", "detected_licenses": ["BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ZmqDriverClient:\n \"\"\"A class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\"\"\"\n\n def __init__(self, host, cmd_port, event_port):\n \"\"\"Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\"\"\"\n <|body_0|>\n\n def start_messaging(self, evt_callback=None):\n \"\"\"Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\"\"\"\n <|body_1|>\n\n def stop_messaging(self):\n \"\"\"Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\"\"\"\n <|body_2|>\n\n def cmd_dvr(self, cmd, *args, **kwargs):\n \"\"\"Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DriverClient.__init__(self)\n self.host = host\n self.cmd_port = cmd_port\n self.event_port = event_port\n self.cmd_host_string = 'tcp://%s:%i' % (self.host, self.cmd_port)\n self.event_host_string = 'tcp://%s:%i' % (self.host, self.event_port)\n self.zmq_context = None\n self.zmq_cmd_socket = None\n self.event_thread = None\n self.stop_event_thread = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.zmq_context = zmq.Context()\n self.zmq_cmd_socket = self.zmq_context.socket(zmq.REQ)\n self.zmq_cmd_socket.connect(self.cmd_host_string)\n log.info('Driver client cmd socket connected to %s.' % self.cmd_host_string)\n self.evt_callback = evt_callback\n\n def recv_evt_messages(driver_client):\n \"\"\"\n A looping function that monitors a ZMQ SUB socket for asynchronous\n driver events. Can be run as a thread or greenlet.\n @param driver_client The client object that launches the thread.\n \"\"\"\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(driver_client.event_host_string)\n sock.setsockopt(zmq.SUBSCRIBE, '')\n log.info('Driver client event thread connected to %s.' % driver_client.event_host_string)\n driver_client.stop_event_thread = False\n while not driver_client.stop_event_thread:\n try:\n evt = sock.recv_pyobj(flags=zmq.NOBLOCK)\n log.debug('got event: %s' % str(evt))\n if driver_client.evt_callback:\n driver_client.evt_callback(evt)\n except zmq.ZMQError:\n time.sleep(0.5)\n sock.close()\n context.term()\n log.info('Client event socket closed.')\n self.event_thread = thread.start_new_thread(recv_evt_messages, (self,))\n log.info('Driver client messaging started.')\n<|end_body_1|>\n\n<|body_start_2|>\n self.zmq_cmd_socket.close()\n self.zmq_cmd_socket = None\n self.zmq_context.term()\n self.zmq_context = None\n self.stop_event_thread = True\n self.event_thread = None\n self.evt_callback = None\n log.info('Driver client messaging closed.')\n<|end_body_2|>\n\n<|body_start_3|>\n msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}\n log.debug('Sending command %s.' % str(msg))\n while True:\n try:\n self.zmq_cmd_socket.send_pyobj(msg)\n if msg == 'stop_driver_process':\n return 'driver stopping'\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Awaiting reply.')\n while True:\n try:\n reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Reply: %s.' % str(reply))\n if isinstance(reply, Exception):\n raise reply\n else:\n return reply\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000401", "length_bytes": 6238, "license_type": "permissive", "methods": [{"docstring": "Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.", "name": "__init__", "signature": "def __init__(self, host, cmd_port, event_port)"}, {"docstring": "Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.", "name": "start_messaging", "signature": "def start_messaging(self, evt_callback=None)"}, {"docstring": "Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.", "name": "stop_messaging", "signature": "def stop_messaging(self)"}, {"docstring": "Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.", "name": "cmd_dvr", "signature": "def cmd_dvr(self, cmd, *args, **kwargs)"}], "n_methods": 4, "prompt": "Implement the Python class `ZmqDriverClient` described below.\n\nClass description:\nA class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\n\nMethod signatures and docstrings:\n- def __init__(self, host, cmd_port, event_port): Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\n- def start_messaging(self, evt_callback=None): Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\n- def stop_messaging(self): Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\n- def cmd_dvr(self, cmd, *args, **kwargs): Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.", "prompted_full_text": "Implement the Python class `ZmqDriverClient` described below.\n\nClass description:\nA class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\n\nMethod signatures and docstrings:\n- def __init__(self, host, cmd_port, event_port): Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\n- def start_messaging(self, evt_callback=None): Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\n- def stop_messaging(self): Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\n- def cmd_dvr(self, cmd, *args, **kwargs): Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.\n\n<|skeleton|>\nclass ZmqDriverClient:\n \"\"\"A class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\"\"\"\n\n def __init__(self, host, cmd_port, event_port):\n \"\"\"Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\"\"\"\n <|body_0|>\n\n def start_messaging(self, evt_callback=None):\n \"\"\"Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\"\"\"\n <|body_1|>\n\n def stop_messaging(self):\n \"\"\"Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\"\"\"\n <|body_2|>\n\n def cmd_dvr(self, cmd, *args, **kwargs):\n \"\"\"Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DriverClient.__init__(self)\n self.host = host\n self.cmd_port = cmd_port\n self.event_port = event_port\n self.cmd_host_string = 'tcp://%s:%i' % (self.host, self.cmd_port)\n self.event_host_string = 'tcp://%s:%i' % (self.host, self.event_port)\n self.zmq_context = None\n self.zmq_cmd_socket = None\n self.event_thread = None\n self.stop_event_thread = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.zmq_context = zmq.Context()\n self.zmq_cmd_socket = self.zmq_context.socket(zmq.REQ)\n self.zmq_cmd_socket.connect(self.cmd_host_string)\n log.info('Driver client cmd socket connected to %s.' % self.cmd_host_string)\n self.evt_callback = evt_callback\n\n def recv_evt_messages(driver_client):\n \"\"\"\n A looping function that monitors a ZMQ SUB socket for asynchronous\n driver events. Can be run as a thread or greenlet.\n @param driver_client The client object that launches the thread.\n \"\"\"\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(driver_client.event_host_string)\n sock.setsockopt(zmq.SUBSCRIBE, '')\n log.info('Driver client event thread connected to %s.' % driver_client.event_host_string)\n driver_client.stop_event_thread = False\n while not driver_client.stop_event_thread:\n try:\n evt = sock.recv_pyobj(flags=zmq.NOBLOCK)\n log.debug('got event: %s' % str(evt))\n if driver_client.evt_callback:\n driver_client.evt_callback(evt)\n except zmq.ZMQError:\n time.sleep(0.5)\n sock.close()\n context.term()\n log.info('Client event socket closed.')\n self.event_thread = thread.start_new_thread(recv_evt_messages, (self,))\n log.info('Driver client messaging started.')\n<|end_body_1|>\n\n<|body_start_2|>\n self.zmq_cmd_socket.close()\n self.zmq_cmd_socket = None\n self.zmq_context.term()\n self.zmq_context = None\n self.stop_event_thread = True\n self.event_thread = None\n self.evt_callback = None\n log.info('Driver client messaging closed.')\n<|end_body_2|>\n\n<|body_start_3|>\n msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}\n log.debug('Sending command %s.' % str(msg))\n while True:\n try:\n self.zmq_cmd_socket.send_pyobj(msg)\n if msg == 'stop_driver_process':\n return 'driver stopping'\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Awaiting reply.')\n while True:\n try:\n reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Reply: %s.' % str(reply))\n if isinstance(reply, Exception):\n raise reply\n else:\n return reply\n<|end_body_3|>\n", "revision_id": "bdbf01f5614e7188ce19596704794466e5683b30", "skeleton": "<|skeleton|>\nclass ZmqDriverClient:\n \"\"\"A class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\"\"\"\n\n def __init__(self, host, cmd_port, event_port):\n \"\"\"Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\"\"\"\n <|body_0|>\n\n def start_messaging(self, evt_callback=None):\n \"\"\"Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\"\"\"\n <|body_1|>\n\n def stop_messaging(self):\n \"\"\"Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\"\"\"\n <|body_2|>\n\n def cmd_dvr(self, cmd, *args, **kwargs):\n \"\"\"Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ZmqDriverClient:\n \"\"\"A class for communicating with a ZMQ-based driver process using python thread for catching asynchronous driver events.\"\"\"\n\n def __init__(self, host, cmd_port, event_port):\n \"\"\"Initialize members. @param host Host string address of the driver process. @param cmd_port Port number for the driver process command port. @param event_port Port number for the driver process event port.\"\"\"\n DriverClient.__init__(self)\n self.host = host\n self.cmd_port = cmd_port\n self.event_port = event_port\n self.cmd_host_string = 'tcp://%s:%i' % (self.host, self.cmd_port)\n self.event_host_string = 'tcp://%s:%i' % (self.host, self.event_port)\n self.zmq_context = None\n self.zmq_cmd_socket = None\n self.event_thread = None\n self.stop_event_thread = True\n\n def start_messaging(self, evt_callback=None):\n \"\"\"Initialize and start messaging resources for the driver process client. Initializes command socket for sending requests, and starts event thread that listens for events from the driver process independently of command request-reply.\"\"\"\n self.zmq_context = zmq.Context()\n self.zmq_cmd_socket = self.zmq_context.socket(zmq.REQ)\n self.zmq_cmd_socket.connect(self.cmd_host_string)\n log.info('Driver client cmd socket connected to %s.' % self.cmd_host_string)\n self.evt_callback = evt_callback\n\n def recv_evt_messages(driver_client):\n \"\"\"\n A looping function that monitors a ZMQ SUB socket for asynchronous\n driver events. Can be run as a thread or greenlet.\n @param driver_client The client object that launches the thread.\n \"\"\"\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(driver_client.event_host_string)\n sock.setsockopt(zmq.SUBSCRIBE, '')\n log.info('Driver client event thread connected to %s.' % driver_client.event_host_string)\n driver_client.stop_event_thread = False\n while not driver_client.stop_event_thread:\n try:\n evt = sock.recv_pyobj(flags=zmq.NOBLOCK)\n log.debug('got event: %s' % str(evt))\n if driver_client.evt_callback:\n driver_client.evt_callback(evt)\n except zmq.ZMQError:\n time.sleep(0.5)\n sock.close()\n context.term()\n log.info('Client event socket closed.')\n self.event_thread = thread.start_new_thread(recv_evt_messages, (self,))\n log.info('Driver client messaging started.')\n\n def stop_messaging(self):\n \"\"\"Close messaging resources for the driver process client. Close ZMQ command socket and terminate command context. Set flag to cause event thread to close event socket and context and terminate. Await event thread completion and return.\"\"\"\n self.zmq_cmd_socket.close()\n self.zmq_cmd_socket = None\n self.zmq_context.term()\n self.zmq_context = None\n self.stop_event_thread = True\n self.event_thread = None\n self.evt_callback = None\n log.info('Driver client messaging closed.')\n\n def cmd_dvr(self, cmd, *args, **kwargs):\n \"\"\"Command a driver by request-reply messaging. Package command message and send on blocking command socket. Block on same socket to receive the reply. Return the driver reply. @param cmd The driver command identifier. @param args Positional arguments of the command. @param kwargs Keyword arguments of the command. @retval Command result.\"\"\"\n msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}\n log.debug('Sending command %s.' % str(msg))\n while True:\n try:\n self.zmq_cmd_socket.send_pyobj(msg)\n if msg == 'stop_driver_process':\n return 'driver stopping'\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Awaiting reply.')\n while True:\n try:\n reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)\n break\n except zmq.ZMQError:\n time.sleep(0.5)\n log.debug('Reply: %s.' % str(reply))\n if isinstance(reply, Exception):\n raise reply\n else:\n return reply\n", "source": "the_stack_v2_python_sparse", "source_path": "mi/core/instrument/zmq_driver_client.py", "source_repo": "oceanobservatories/mi-instrument", "split": "val", "star_events_count": 1} {"blob_id": "fee68699409cd9530e8fd967122376d13e8b3d7c", "bodies": ["x = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(x, title='Set Mouse Cursor Position', prompt='Please input x-coordinate:')\ny = self.get_y()\nctypes.windll.user32.SetCursorPos(x, y)", "x = self.get_x()\ny = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(y, title='Set Mouse Cursor Position', prompt='Please input y-coordinate:')\nctypes.windll.user32.SetCursorPos(x, y)", "button = button.upper()\nfor action in ('DOWN', 'UP'):\n const = getattr(win32con, 'MOUSEEVENTF_{}{}'.format(button, action))\n ctypes.windll.user32.mouse_event(const, 0, 0, 0, 0)"], "bodies_text": "<|body_start_0|>\n x = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(x, title='Set Mouse Cursor Position', prompt='Please input x-coordinate:')\n y = self.get_y()\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.get_x()\n y = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(y, title='Set Mouse Cursor Position', prompt='Please input y-coordinate:')\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_1|>\n\n<|body_start_2|>\n button = button.upper()\n for action in ('DOWN', 'UP'):\n const = getattr(win32con, 'MOUSEEVENTF_{}{}'.format(button, action))\n ctypes.windll.user32.mouse_event(const, 0, 0, 0, 0)\n<|end_body_2|>\n", "class_docstring": "The advanced mouse node on Windows.", "class_name": "Mouse", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Mouse:\n \"\"\"The advanced mouse node on Windows.\"\"\"\n\n def set_x(self, x):\n \"\"\"Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\"\"\"\n <|body_0|>\n\n def set_y(self, y):\n \"\"\"Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\"\"\"\n <|body_1|>\n\n def click(self, button='left'):\n \"\"\"Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n x = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(x, title='Set Mouse Cursor Position', prompt='Please input x-coordinate:')\n y = self.get_y()\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.get_x()\n y = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(y, title='Set Mouse Cursor Position', prompt='Please input y-coordinate:')\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_1|>\n\n<|body_start_2|>\n button = button.upper()\n for action in ('DOWN', 'UP'):\n const = getattr(win32con, 'MOUSEEVENTF_{}{}'.format(button, action))\n ctypes.windll.user32.mouse_event(const, 0, 0, 0, 0)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000402", "length_bytes": 18417, "license_type": "no_license", "methods": [{"docstring": "Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.", "name": "set_x", "signature": "def set_x(self, x)"}, {"docstring": "Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.", "name": "set_y", "signature": "def set_y(self, y)"}, {"docstring": "Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.", "name": "click", "signature": "def click(self, button='left')"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002245", "prompt": "Implement the Python class `Mouse` described below.\n\nClass description:\nThe advanced mouse node on Windows.\n\nMethod signatures and docstrings:\n- def set_x(self, x): Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\n- def set_y(self, y): Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\n- def click(self, button='left'): Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.", "prompted_full_text": "Implement the Python class `Mouse` described below.\n\nClass description:\nThe advanced mouse node on Windows.\n\nMethod signatures and docstrings:\n- def set_x(self, x): Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\n- def set_y(self, y): Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\n- def click(self, button='left'): Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.\n\n<|skeleton|>\nclass Mouse:\n \"\"\"The advanced mouse node on Windows.\"\"\"\n\n def set_x(self, x):\n \"\"\"Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\"\"\"\n <|body_0|>\n\n def set_y(self, y):\n \"\"\"Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\"\"\"\n <|body_1|>\n\n def click(self, button='left'):\n \"\"\"Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n x = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(x, title='Set Mouse Cursor Position', prompt='Please input x-coordinate:')\n y = self.get_y()\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.get_x()\n y = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(y, title='Set Mouse Cursor Position', prompt='Please input y-coordinate:')\n ctypes.windll.user32.SetCursorPos(x, y)\n<|end_body_1|>\n\n<|body_start_2|>\n button = button.upper()\n for action in ('DOWN', 'UP'):\n const = getattr(win32con, 'MOUSEEVENTF_{}{}'.format(button, action))\n ctypes.windll.user32.mouse_event(const, 0, 0, 0, 0)\n<|end_body_2|>\n", "revision_id": "3945ef235ac8e7a7a66fec018597aa9b34b0a4e6", "skeleton": "<|skeleton|>\nclass Mouse:\n \"\"\"The advanced mouse node on Windows.\"\"\"\n\n def set_x(self, x):\n \"\"\"Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\"\"\"\n <|body_0|>\n\n def set_y(self, y):\n \"\"\"Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\"\"\"\n <|body_1|>\n\n def click(self, button='left'):\n \"\"\"Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Mouse:\n \"\"\"The advanced mouse node on Windows.\"\"\"\n\n def set_x(self, x):\n \"\"\"Set the x-coord of the mouse pointer. x: int. The new x-coord of the mouse pointer.\"\"\"\n x = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(x, title='Set Mouse Cursor Position', prompt='Please input x-coordinate:')\n y = self.get_y()\n ctypes.windll.user32.SetCursorPos(x, y)\n\n def set_y(self, y):\n \"\"\"Set the y-coord of the mouse pointer. y: int. The new y-coord of the mouse pointer.\"\"\"\n x = self.get_x()\n y = self.root_node.gui.dialogs.constant_handler_ASK_INTEGER(y, title='Set Mouse Cursor Position', prompt='Please input y-coordinate:')\n ctypes.windll.user32.SetCursorPos(x, y)\n\n def click(self, button='left'):\n \"\"\"Simulate a mouse click. button: [\"left\", \"right\", \"middle\"]. The button which will be simulated.\"\"\"\n button = button.upper()\n for action in ('DOWN', 'UP'):\n const = getattr(win32con, 'MOUSEEVENTF_{}{}'.format(button, action))\n ctypes.windll.user32.mouse_event(const, 0, 0, 0, 0)\n", "source": "the_stack_v2_python_sparse", "source_path": "wavesynlib/interfaces/os/modelnode.py", "source_repo": "xialulee/WaveSyn", "split": "val", "star_events_count": 9} {"blob_id": "536563c680f65385764d84e0b2d0b534c5a77ed1", "bodies": ["self.father = father\nself.row = row\nself.column = column\nself.h = distance((row, column), (target_pos[0], target_pos[1]))\nif not father:\n self.g = 0\nelse:\n self.g = self.father.g + values[map[row][column]]\nself.f = self.h + self.g", "result = ''\nresult += 'Fila: ' + str(self.row)\nresult += ' Columna: ' + str(self.column)\nresult += ' h: ' + str(self.h)\nresult += ' g: ' + str(self.g)\nresult += ' f: ' + str(self.f)\nreturn result"], "bodies_text": "<|body_start_0|>\n self.father = father\n self.row = row\n self.column = column\n self.h = distance((row, column), (target_pos[0], target_pos[1]))\n if not father:\n self.g = 0\n else:\n self.g = self.father.g + values[map[row][column]]\n self.f = self.h + self.g\n<|end_body_0|>\n\n<|body_start_1|>\n result = ''\n result += 'Fila: ' + str(self.row)\n result += ' Columna: ' + str(self.column)\n result += ' h: ' + str(self.h)\n result += ' g: ' + str(self.g)\n result += ' f: ' + str(self.f)\n return result\n<|end_body_1|>\n", "class_docstring": "@brief Representa un nodo(posible estado) en el A*", "class_name": "Nodo", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Nodo:\n \"\"\"@brief Representa un nodo(posible estado) en el A*\"\"\"\n\n def __init__(self, row, column, target_pos, father=None):\n \"\"\"@brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"@brief Conversor a cadena\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.father = father\n self.row = row\n self.column = column\n self.h = distance((row, column), (target_pos[0], target_pos[1]))\n if not father:\n self.g = 0\n else:\n self.g = self.father.g + values[map[row][column]]\n self.f = self.h + self.g\n<|end_body_0|>\n\n<|body_start_1|>\n result = ''\n result += 'Fila: ' + str(self.row)\n result += ' Columna: ' + str(self.column)\n result += ' h: ' + str(self.h)\n result += ' g: ' + str(self.g)\n result += ' f: ' + str(self.f)\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000403", "length_bytes": 9806, "license_type": "no_license", "methods": [{"docstring": "@brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto", "name": "__init__", "signature": "def __init__(self, row, column, target_pos, father=None)"}, {"docstring": "@brief Conversor a cadena", "name": "__str__", "signature": "def __str__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000637", "prompt": "Implement the Python class `Nodo` described below.\n\nClass description:\n@brief Representa un nodo(posible estado) en el A*\n\nMethod signatures and docstrings:\n- def __init__(self, row, column, target_pos, father=None): @brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\n- def __str__(self): @brief Conversor a cadena", "prompted_full_text": "Implement the Python class `Nodo` described below.\n\nClass description:\n@brief Representa un nodo(posible estado) en el A*\n\nMethod signatures and docstrings:\n- def __init__(self, row, column, target_pos, father=None): @brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\n- def __str__(self): @brief Conversor a cadena\n\n<|skeleton|>\nclass Nodo:\n \"\"\"@brief Representa un nodo(posible estado) en el A*\"\"\"\n\n def __init__(self, row, column, target_pos, father=None):\n \"\"\"@brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"@brief Conversor a cadena\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.father = father\n self.row = row\n self.column = column\n self.h = distance((row, column), (target_pos[0], target_pos[1]))\n if not father:\n self.g = 0\n else:\n self.g = self.father.g + values[map[row][column]]\n self.f = self.h + self.g\n<|end_body_0|>\n\n<|body_start_1|>\n result = ''\n result += 'Fila: ' + str(self.row)\n result += ' Columna: ' + str(self.column)\n result += ' h: ' + str(self.h)\n result += ' g: ' + str(self.g)\n result += ' f: ' + str(self.f)\n return result\n<|end_body_1|>\n", "revision_id": "994a5ca9b464c9e11de96d50079503743a0035fc", "skeleton": "<|skeleton|>\nclass Nodo:\n \"\"\"@brief Representa un nodo(posible estado) en el A*\"\"\"\n\n def __init__(self, row, column, target_pos, father=None):\n \"\"\"@brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"@brief Conversor a cadena\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Nodo:\n \"\"\"@brief Representa un nodo(posible estado) en el A*\"\"\"\n\n def __init__(self, row, column, target_pos, father=None):\n \"\"\"@brief Constructor @param row Fila del nodo en el mapa @param column Columna del nodo en el mapa @param target_pos Posicion del objetivo al que se desea llegar. @param father Nodo padre, None por defecto\"\"\"\n self.father = father\n self.row = row\n self.column = column\n self.h = distance((row, column), (target_pos[0], target_pos[1]))\n if not father:\n self.g = 0\n else:\n self.g = self.father.g + values[map[row][column]]\n self.f = self.h + self.g\n\n def __str__(self):\n \"\"\"@brief Conversor a cadena\"\"\"\n result = ''\n result += 'Fila: ' + str(self.row)\n result += ' Columna: ' + str(self.column)\n result += ' h: ' + str(self.h)\n result += ' g: ' + str(self.g)\n result += ' f: ' + str(self.f)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "engine/astar.py", "source_repo": "jmarente/zycars", "split": "val", "star_events_count": 0} {"blob_id": "ebdbb32710c5c0e0c3085f464d3dca6c63f8a459", "bodies": ["sums = [0] * (len(nums) + 1)\nfor i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\nfor i in range(len(nums) - 1):\n for j in range(i + 1, len(nums) + 1):\n if (sums[j] - sums[i]) % k == 0:\n return True\nreturn False", "modes = set()\npresum = 0\nfor num in nums:\n last = presum\n presum += num\n presum %= k\n if presum in modes:\n return True\n modes.add(last)\nreturn False"], "bodies_text": "<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums) + 1):\n if (sums[j] - sums[i]) % k == 0:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n modes = set()\n presum = 0\n for num in nums:\n last = presum\n presum += num\n presum %= k\n if presum in modes:\n return True\n modes.add(last)\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n \"\"\"简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def checkSubarraySum1(self, nums: List[int], k: int) -> bool:\n \"\"\"前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums) + 1):\n if (sums[j] - sums[i]) % k == 0:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n modes = set()\n presum = 0\n for num in nums:\n last = presum\n presum += num\n presum %= k\n if presum in modes:\n return True\n modes.add(last)\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000404", "length_bytes": 1948, "license_type": "no_license", "methods": [{"docstring": "简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:", "name": "checkSubarraySum", "signature": "def checkSubarraySum(self, nums: List[int], k: int) -> bool"}, {"docstring": "前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:", "name": "checkSubarraySum1", "signature": "def checkSubarraySum1(self, nums: List[int], k: int) -> bool"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkSubarraySum(self, nums: List[int], k: int) -> bool: 简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\n- def checkSubarraySum1(self, nums: List[int], k: int) -> bool: 前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def checkSubarraySum(self, nums: List[int], k: int) -> bool: 简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\n- def checkSubarraySum1(self, nums: List[int], k: int) -> bool: 前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:\n\n<|skeleton|>\nclass Solution:\n\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n \"\"\"简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def checkSubarraySum1(self, nums: List[int], k: int) -> bool:\n \"\"\"前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums) + 1):\n if (sums[j] - sums[i]) % k == 0:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n modes = set()\n presum = 0\n for num in nums:\n last = presum\n presum += num\n presum %= k\n if presum in modes:\n return True\n modes.add(last)\n return False\n<|end_body_1|>\n", "revision_id": "9acba92695c06406f12f997a720bfe1deb9464a8", "skeleton": "<|skeleton|>\nclass Solution:\n\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n \"\"\"简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\"\"\"\n <|body_0|>\n\n def checkSubarraySum1(self, nums: List[int], k: int) -> bool:\n \"\"\"前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n \"\"\"简单前缀和,超时,O(n ** 2) :param nums: :param k: :return:\"\"\"\n sums = [0] * (len(nums) + 1)\n for i in range(len(nums)):\n sums[i + 1] = nums[i] + sums[i]\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums) + 1):\n if (sums[j] - sums[i]) % k == 0:\n return True\n return False\n\n def checkSubarraySum1(self, nums: List[int], k: int) -> bool:\n \"\"\"前缀和 + 同余定理 (sums[j] - sums[i]) % k == 0 同理有 sums[i] % k == sums[i] % k :param nums: :param k: :return:\"\"\"\n modes = set()\n presum = 0\n for num in nums:\n last = presum\n presum += num\n presum %= k\n if presum in modes:\n return True\n modes.add(last)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "datastructure/binary_array/CheckSubarraySum.py", "source_repo": "yinhuax/leet_code", "split": "val", "star_events_count": 0} {"blob_id": "a67e5fde5bf0a4e1aded27b3a2f3c77ad60a7419", "bodies": ["if isinstance(gis, GIS):\n self._url = url\n self._gis = gis\n self._portal = gis._portal\n self._con = gis._con\nelse:\n raise ValueError('gis object must be of type GIS')", "url = '%s/clean' % self._url\nparams = {'f': 'json'}\nres = self._con.post(path=url, postdata=params)\nif isinstance(res, dict) and 'status' in res:\n return res['status'] == 'success'\nreturn False", "url = '%s/settings' % self._url\nparams = {'f': 'json'}\nreturn self._con.get(path=url, params=params)", "url = '%s/settings/edit' % self._url\nparams = {'f': 'json'}\nif isinstance(value, dict):\n for k, v in value.items():\n params[k] = v\nelse:\n raise ValueError('Value must be a dictionary')\nreturn self._con.post(path=url, postdata=params)", "from datetime import datetime\nfrom six import integer_types, string_types\nurl = '%s/query' % self._url\nif isinstance(start_time, datetime):\n start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')\nelif isinstance(start_time, string_types):\n try:\n datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid start_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\nelif isinstance(start_time, tuple(list(integer_types) + [float])):\n start_time = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S')\nif end_time is None:\n end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\nelif isinstance(end_time, datetime):\n end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')\nelif isinstance(end_time, string_types):\n try:\n datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid end_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\nelif isinstance(end_time, tuple(list(integer_types) + [float])):\n end_time = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%dT%H:%M:%S')\nif query_filter == '*':\n query_filter = {'codes': [], 'users': [], 'source': '*'}\nparams = {'startTime': start_time, 'endTime': end_time, 'level': level, 'f': 'json', 'filterType': 'json', 'pageSize': page_size}\nif query_filter:\n params['filter'] = query_filter\nreturn self._con.get(path=url, params=params)"], "bodies_text": "<|body_start_0|>\n if isinstance(gis, GIS):\n self._url = url\n self._gis = gis\n self._portal = gis._portal\n self._con = gis._con\n else:\n raise ValueError('gis object must be of type GIS')\n<|end_body_0|>\n\n<|body_start_1|>\n url = '%s/clean' % self._url\n params = {'f': 'json'}\n res = self._con.post(path=url, postdata=params)\n if isinstance(res, dict) and 'status' in res:\n return res['status'] == 'success'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n url = '%s/settings' % self._url\n params = {'f': 'json'}\n return self._con.get(path=url, params=params)\n<|end_body_2|>\n\n<|body_start_3|>\n url = '%s/settings/edit' % self._url\n params = {'f': 'json'}\n if isinstance(value, dict):\n for k, v in value.items():\n params[k] = v\n else:\n raise ValueError('Value must be a dictionary')\n return self._con.post(path=url, postdata=params)\n<|end_body_3|>\n\n<|body_start_4|>\n from datetime import datetime\n from six import integer_types, string_types\n url = '%s/query' % self._url\n if isinstance(start_time, datetime):\n start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(start_time, string_types):\n try:\n datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid start_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(start_time, tuple(list(integer_types) + [float])):\n start_time = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S')\n if end_time is None:\n end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, datetime):\n end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, string_types):\n try:\n datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid end_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(end_time, tuple(list(integer_types) + [float])):\n end_time = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%dT%H:%M:%S')\n if query_filter == '*':\n query_filter = {'codes': [], 'users': [], 'source': '*'}\n params = {'startTime': start_time, 'endTime': end_time, 'level': level, 'f': 'json', 'filterType': 'json', 'pageSize': page_size}\n if query_filter:\n params['filter'] = query_filter\n return self._con.get(path=url, params=params)\n<|end_body_4|>\n", "class_docstring": "Logs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================", "class_name": "Logs", "detected_licenses": ["Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Logs:\n \"\"\"Logs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\"\"\"\n\n def __init__(self, url, gis):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\"\"\"\n <|body_1|>\n\n def settings(self):\n \"\"\"Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\"\"\"\n <|body_2|>\n\n def settings(self, value):\n \"\"\"Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\"\"\"\n <|body_3|>\n\n def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000):\n \"\"\"The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(gis, GIS):\n self._url = url\n self._gis = gis\n self._portal = gis._portal\n self._con = gis._con\n else:\n raise ValueError('gis object must be of type GIS')\n<|end_body_0|>\n\n<|body_start_1|>\n url = '%s/clean' % self._url\n params = {'f': 'json'}\n res = self._con.post(path=url, postdata=params)\n if isinstance(res, dict) and 'status' in res:\n return res['status'] == 'success'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n url = '%s/settings' % self._url\n params = {'f': 'json'}\n return self._con.get(path=url, params=params)\n<|end_body_2|>\n\n<|body_start_3|>\n url = '%s/settings/edit' % self._url\n params = {'f': 'json'}\n if isinstance(value, dict):\n for k, v in value.items():\n params[k] = v\n else:\n raise ValueError('Value must be a dictionary')\n return self._con.post(path=url, postdata=params)\n<|end_body_3|>\n\n<|body_start_4|>\n from datetime import datetime\n from six import integer_types, string_types\n url = '%s/query' % self._url\n if isinstance(start_time, datetime):\n start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(start_time, string_types):\n try:\n datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid start_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(start_time, tuple(list(integer_types) + [float])):\n start_time = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S')\n if end_time is None:\n end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, datetime):\n end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, string_types):\n try:\n datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid end_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(end_time, tuple(list(integer_types) + [float])):\n end_time = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%dT%H:%M:%S')\n if query_filter == '*':\n query_filter = {'codes': [], 'users': [], 'source': '*'}\n params = {'startTime': start_time, 'endTime': end_time, 'level': level, 'f': 'json', 'filterType': 'json', 'pageSize': page_size}\n if query_filter:\n params['filter'] = query_filter\n return self._con.get(path=url, params=params)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000405", "length_bytes": 10658, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, url, gis)"}, {"docstring": "Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success", "name": "clean", "signature": "def clean(self)"}, {"docstring": "Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings", "name": "settings", "signature": "def settings(self)"}, {"docstring": "Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None", "name": "settings", "signature": "def settings(self, value)"}, {"docstring": "The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se", "name": "query", "signature": "def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000)"}], "n_methods": 5, "prompt": "Implement the Python class `Logs` described below.\n\nClass description:\nLogs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\n\nMethod signatures and docstrings:\n- def __init__(self, url, gis): Constructor\n- def clean(self): Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\n- def settings(self): Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\n- def settings(self, value): Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\n- def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000): The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se", "prompted_full_text": "Implement the Python class `Logs` described below.\n\nClass description:\nLogs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\n\nMethod signatures and docstrings:\n- def __init__(self, url, gis): Constructor\n- def clean(self): Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\n- def settings(self): Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\n- def settings(self, value): Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\n- def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000): The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se\n\n<|skeleton|>\nclass Logs:\n \"\"\"Logs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\"\"\"\n\n def __init__(self, url, gis):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\"\"\"\n <|body_1|>\n\n def settings(self):\n \"\"\"Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\"\"\"\n <|body_2|>\n\n def settings(self, value):\n \"\"\"Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\"\"\"\n <|body_3|>\n\n def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000):\n \"\"\"The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(gis, GIS):\n self._url = url\n self._gis = gis\n self._portal = gis._portal\n self._con = gis._con\n else:\n raise ValueError('gis object must be of type GIS')\n<|end_body_0|>\n\n<|body_start_1|>\n url = '%s/clean' % self._url\n params = {'f': 'json'}\n res = self._con.post(path=url, postdata=params)\n if isinstance(res, dict) and 'status' in res:\n return res['status'] == 'success'\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n url = '%s/settings' % self._url\n params = {'f': 'json'}\n return self._con.get(path=url, params=params)\n<|end_body_2|>\n\n<|body_start_3|>\n url = '%s/settings/edit' % self._url\n params = {'f': 'json'}\n if isinstance(value, dict):\n for k, v in value.items():\n params[k] = v\n else:\n raise ValueError('Value must be a dictionary')\n return self._con.post(path=url, postdata=params)\n<|end_body_3|>\n\n<|body_start_4|>\n from datetime import datetime\n from six import integer_types, string_types\n url = '%s/query' % self._url\n if isinstance(start_time, datetime):\n start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(start_time, string_types):\n try:\n datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid start_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(start_time, tuple(list(integer_types) + [float])):\n start_time = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S')\n if end_time is None:\n end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, datetime):\n end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, string_types):\n try:\n datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid end_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(end_time, tuple(list(integer_types) + [float])):\n end_time = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%dT%H:%M:%S')\n if query_filter == '*':\n query_filter = {'codes': [], 'users': [], 'source': '*'}\n params = {'startTime': start_time, 'endTime': end_time, 'level': level, 'f': 'json', 'filterType': 'json', 'pageSize': page_size}\n if query_filter:\n params['filter'] = query_filter\n return self._con.get(path=url, params=params)\n<|end_body_4|>\n", "revision_id": "a874fe7e5c95196e4de68db2da0e2a05eb70e5d8", "skeleton": "<|skeleton|>\nclass Logs:\n \"\"\"Logs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\"\"\"\n\n def __init__(self, url, gis):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\"\"\"\n <|body_1|>\n\n def settings(self):\n \"\"\"Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\"\"\"\n <|body_2|>\n\n def settings(self, value):\n \"\"\"Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\"\"\"\n <|body_3|>\n\n def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000):\n \"\"\"The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Logs:\n \"\"\"Logs are records written by various components of the portal. You can query the logs, clean the logs, and edit log settings. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- gis required GIS, portal connection object ---------------- --------------------------------------------------------------- url required string, web address of the log resource ================ ===============================================================\"\"\"\n\n def __init__(self, url, gis):\n \"\"\"Constructor\"\"\"\n if isinstance(gis, GIS):\n self._url = url\n self._gis = gis\n self._portal = gis._portal\n self._con = gis._con\n else:\n raise ValueError('gis object must be of type GIS')\n\n def clean(self):\n \"\"\"Deletes all the log files on the machine hosting Portal for ArcGIS. This operation allows you to free up disk space. The logs cannot be recovered after executing this operation. .. code-block:: python USAGE: Clean logs from your Portal Admin API from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs resp = logs.clean() print(resp) # Output True :returns: Boolean True or False depicting success\"\"\"\n url = '%s/clean' % self._url\n params = {'f': 'json'}\n res = self._con.post(path=url, postdata=params)\n if isinstance(res, dict) and 'status' in res:\n return res['status'] == 'success'\n return False\n\n def settings(self):\n \"\"\"Reads/writes the current log settings for the portal. .. code-block:: python USAGE: Print out the Log Settings from arcgis.gis import GIS gis = GIS(\"https://yourportal.com/portal\", \"portaladmin\", \"password\") logs = gis.admin.logs logsettings = logs.settings for key, value in dict(logsettings).items(): print(\"{} : {}\".format(key, value)) # Output logDir : C:\\\\arcgisportal\\\\logs logLevel : INFO maxErrorReportsCount : 10 maxLogFileAge : 90 usageMeteringEnabled : False :returns: Dictionary of key/value pairs of log settings\"\"\"\n url = '%s/settings' % self._url\n params = {'f': 'json'}\n return self._con.get(path=url, params=params)\n\n def settings(self, value):\n \"\"\"Reads/writes the current log settings for the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- value required dictionary, the dictionary of the log settings ================ =============================================================== :returns: None\"\"\"\n url = '%s/settings/edit' % self._url\n params = {'f': 'json'}\n if isinstance(value, dict):\n for k, v in value.items():\n params[k] = v\n else:\n raise ValueError('Value must be a dictionary')\n return self._con.post(path=url, postdata=params)\n\n def query(self, start_time, end_time=None, level='WARNING', query_filter='*', page_size=1000):\n \"\"\"The query operation allows you to aggregate, filter, and page through logs written by the portal. ================ =============================================================== **Argument** **Description** ---------------- --------------------------------------------------------------- start_time required datetime/float. The most recent time to query. Local date corresponding to the POSIX timestamp, such as is returned by time.time(). This may raise OverflowError, if the timestamp is out of the range of values supported by the platform. It's common for this to be restricted to years from 1970 through 2038. Time can be specified as a portal timestamp (format in \"%Y-%m-%dT%H:%M:%S\") or in se\"\"\"\n from datetime import datetime\n from six import integer_types, string_types\n url = '%s/query' % self._url\n if isinstance(start_time, datetime):\n start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(start_time, string_types):\n try:\n datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid start_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(start_time, tuple(list(integer_types) + [float])):\n start_time = datetime.utcfromtimestamp(start_time).strftime('%Y-%m-%dT%H:%M:%S')\n if end_time is None:\n end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, datetime):\n end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(end_time, string_types):\n try:\n datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S')\n except:\n raise Exception('Invalid end_time string, must be in the format YYYY-MM-DDTHH:MM:SS')\n elif isinstance(end_time, tuple(list(integer_types) + [float])):\n end_time = datetime.utcfromtimestamp(end_time).strftime('%Y-%m-%dT%H:%M:%S')\n if query_filter == '*':\n query_filter = {'codes': [], 'users': [], 'source': '*'}\n params = {'startTime': start_time, 'endTime': end_time, 'level': level, 'f': 'json', 'filterType': 'json', 'pageSize': page_size}\n if query_filter:\n params['filter'] = query_filter\n return self._con.get(path=url, params=params)\n", "source": "the_stack_v2_python_sparse", "source_path": "arcpyenv/arcgispro-py3-clone/Lib/site-packages/arcgis/gis/admin/_logs.py", "source_repo": "SherbazHashmi/HackathonServer", "split": "val", "star_events_count": 3} {"blob_id": "9e65f8910f94f4dcb10e2093c2e8540cfe6c7b90", "bodies": ["if not hasattr(aq_base(self), 'creators'):\n if hasattr(aq_base(self), 'creator') and self.creator != 'unknown':\n self.creators = (self.creator,)\n else:\n self.creators = ()\nreturn self.creators", "tool = getUtility(IDiscussionTool)\ntalkback = tool.getDiscussionFor(self)\nreturn talkback._getReplyParent(self.in_reply_to)", "if getattr(reply_to, 'meta_type', None) == self.meta_type:\n self.in_reply_to = reply_to.getId()\nelse:\n self.in_reply_to = ''", "parents = []\ncurrent = self\nwhile not size or len(parents) < size:\n parent = current.inReplyTo()\n assert not parent in parents\n parents.insert(0, parent)\n if parent.meta_type != self.meta_type:\n break\n current = parent\nreturn parents"], "bodies_text": "<|body_start_0|>\n if not hasattr(aq_base(self), 'creators'):\n if hasattr(aq_base(self), 'creator') and self.creator != 'unknown':\n self.creators = (self.creator,)\n else:\n self.creators = ()\n return self.creators\n<|end_body_0|>\n\n<|body_start_1|>\n tool = getUtility(IDiscussionTool)\n talkback = tool.getDiscussionFor(self)\n return talkback._getReplyParent(self.in_reply_to)\n<|end_body_1|>\n\n<|body_start_2|>\n if getattr(reply_to, 'meta_type', None) == self.meta_type:\n self.in_reply_to = reply_to.getId()\n else:\n self.in_reply_to = ''\n<|end_body_2|>\n\n<|body_start_3|>\n parents = []\n current = self\n while not size or len(parents) < size:\n parent = current.inReplyTo()\n assert not parent in parents\n parents.insert(0, parent)\n if parent.meta_type != self.meta_type:\n break\n current = parent\n return parents\n<|end_body_3|>\n", "class_docstring": "Class for content which is a response to other content.", "class_name": "DiscussionItem", "detected_licenses": ["ZPL-2.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DiscussionItem:\n \"\"\"Class for content which is a response to other content.\"\"\"\n\n def listCreators(self):\n \"\"\"List Dublin Core Creator elements - resource authors.\"\"\"\n <|body_0|>\n\n def inReplyTo(self, REQUEST=None):\n \"\"\"Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\"\"\"\n <|body_1|>\n\n def setReplyTo(self, reply_to):\n \"\"\"Make this object a response to the passed object.\"\"\"\n <|body_2|>\n\n def parentsInThread(self, size=0):\n \"\"\"Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(aq_base(self), 'creators'):\n if hasattr(aq_base(self), 'creator') and self.creator != 'unknown':\n self.creators = (self.creator,)\n else:\n self.creators = ()\n return self.creators\n<|end_body_0|>\n\n<|body_start_1|>\n tool = getUtility(IDiscussionTool)\n talkback = tool.getDiscussionFor(self)\n return talkback._getReplyParent(self.in_reply_to)\n<|end_body_1|>\n\n<|body_start_2|>\n if getattr(reply_to, 'meta_type', None) == self.meta_type:\n self.in_reply_to = reply_to.getId()\n else:\n self.in_reply_to = ''\n<|end_body_2|>\n\n<|body_start_3|>\n parents = []\n current = self\n while not size or len(parents) < size:\n parent = current.inReplyTo()\n assert not parent in parents\n parents.insert(0, parent)\n if parent.meta_type != self.meta_type:\n break\n current = parent\n return parents\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000406", "length_bytes": 12355, "license_type": "permissive", "methods": [{"docstring": "List Dublin Core Creator elements - resource authors.", "name": "listCreators", "signature": "def listCreators(self)"}, {"docstring": "Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.", "name": "inReplyTo", "signature": "def inReplyTo(self, REQUEST=None)"}, {"docstring": "Make this object a response to the passed object.", "name": "setReplyTo", "signature": "def setReplyTo(self, reply_to)"}, {"docstring": "Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.", "name": "parentsInThread", "signature": "def parentsInThread(self, size=0)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000409", "prompt": "Implement the Python class `DiscussionItem` described below.\n\nClass description:\nClass for content which is a response to other content.\n\nMethod signatures and docstrings:\n- def listCreators(self): List Dublin Core Creator elements - resource authors.\n- def inReplyTo(self, REQUEST=None): Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\n- def setReplyTo(self, reply_to): Make this object a response to the passed object.\n- def parentsInThread(self, size=0): Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.", "prompted_full_text": "Implement the Python class `DiscussionItem` described below.\n\nClass description:\nClass for content which is a response to other content.\n\nMethod signatures and docstrings:\n- def listCreators(self): List Dublin Core Creator elements - resource authors.\n- def inReplyTo(self, REQUEST=None): Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\n- def setReplyTo(self, reply_to): Make this object a response to the passed object.\n- def parentsInThread(self, size=0): Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.\n\n<|skeleton|>\nclass DiscussionItem:\n \"\"\"Class for content which is a response to other content.\"\"\"\n\n def listCreators(self):\n \"\"\"List Dublin Core Creator elements - resource authors.\"\"\"\n <|body_0|>\n\n def inReplyTo(self, REQUEST=None):\n \"\"\"Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\"\"\"\n <|body_1|>\n\n def setReplyTo(self, reply_to):\n \"\"\"Make this object a response to the passed object.\"\"\"\n <|body_2|>\n\n def parentsInThread(self, size=0):\n \"\"\"Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(aq_base(self), 'creators'):\n if hasattr(aq_base(self), 'creator') and self.creator != 'unknown':\n self.creators = (self.creator,)\n else:\n self.creators = ()\n return self.creators\n<|end_body_0|>\n\n<|body_start_1|>\n tool = getUtility(IDiscussionTool)\n talkback = tool.getDiscussionFor(self)\n return talkback._getReplyParent(self.in_reply_to)\n<|end_body_1|>\n\n<|body_start_2|>\n if getattr(reply_to, 'meta_type', None) == self.meta_type:\n self.in_reply_to = reply_to.getId()\n else:\n self.in_reply_to = ''\n<|end_body_2|>\n\n<|body_start_3|>\n parents = []\n current = self\n while not size or len(parents) < size:\n parent = current.inReplyTo()\n assert not parent in parents\n parents.insert(0, parent)\n if parent.meta_type != self.meta_type:\n break\n current = parent\n return parents\n<|end_body_3|>\n", "revision_id": "eabf7529eefe13a53ed088250d179a92218af1ed", "skeleton": "<|skeleton|>\nclass DiscussionItem:\n \"\"\"Class for content which is a response to other content.\"\"\"\n\n def listCreators(self):\n \"\"\"List Dublin Core Creator elements - resource authors.\"\"\"\n <|body_0|>\n\n def inReplyTo(self, REQUEST=None):\n \"\"\"Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\"\"\"\n <|body_1|>\n\n def setReplyTo(self, reply_to):\n \"\"\"Make this object a response to the passed object.\"\"\"\n <|body_2|>\n\n def parentsInThread(self, size=0):\n \"\"\"Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DiscussionItem:\n \"\"\"Class for content which is a response to other content.\"\"\"\n\n def listCreators(self):\n \"\"\"List Dublin Core Creator elements - resource authors.\"\"\"\n if not hasattr(aq_base(self), 'creators'):\n if hasattr(aq_base(self), 'creator') and self.creator != 'unknown':\n self.creators = (self.creator,)\n else:\n self.creators = ()\n return self.creators\n\n def inReplyTo(self, REQUEST=None):\n \"\"\"Return the IDiscussable object to which we are a reply. Two cases obtain: - We are a \"top-level\" reply to a non-DiscussionItem piece of content; in this case, our 'in_reply_to' field will be None. - We are a nested reply; in this case, our 'in_reply_to' field will be the ID of the parent DiscussionItem.\"\"\"\n tool = getUtility(IDiscussionTool)\n talkback = tool.getDiscussionFor(self)\n return talkback._getReplyParent(self.in_reply_to)\n\n def setReplyTo(self, reply_to):\n \"\"\"Make this object a response to the passed object.\"\"\"\n if getattr(reply_to, 'meta_type', None) == self.meta_type:\n self.in_reply_to = reply_to.getId()\n else:\n self.in_reply_to = ''\n\n def parentsInThread(self, size=0):\n \"\"\"Return the list of items which are \"above\" this item in the discussion thread. If 'size' is not zero, only the closest 'size' parents will be returned.\"\"\"\n parents = []\n current = self\n while not size or len(parents) < size:\n parent = current.inReplyTo()\n assert not parent in parents\n parents.insert(0, parent)\n if parent.meta_type != self.meta_type:\n break\n current = parent\n return parents\n", "source": "the_stack_v2_python_sparse", "source_path": "branches/Products.CMFDefault/Products/CMFDefault/DiscussionItem.py", "source_repo": "openlegis-br/sagl", "split": "val", "star_events_count": 17} {"blob_id": "b70e73edb101e6303b655e31f58aa1ebc22cac70", "bodies": ["super(SDNet, self).__init__(parameters)\nself.anatomy_factors = 8\nself.modality_factors = 8\nif parameters['patch_size'] != [224, 224, 1]:\n print('WARNING: The patch size is not 224x224, which is required for sdnet. Using default patch size instead', file=sys.stderr)\n parameters['patch_size'] = [224, 224, 1]\nif parameters['batch_size'] == 1:\n raise ValueError(\"'batch_size' needs to be greater than 1 for 'sdnet'\")\nparameters['model']['amp'] = False\nparameters['model']['norm_type'] = 'instance'\nparameters_unet = deepcopy(parameters)\nparameters_unet['model']['num_classes'] = self.anatomy_factors\nparameters_unet['model']['norm_type'] = 'instance'\nparameters_unet['model']['final_layer'] = None\nself.cencoder = unet(parameters_unet)\nself.mencoder = ModalityEncoder(parameters, self.anatomy_factors, self.modality_factors)\nself.decoder = Decoder(parameters, self.anatomy_factors)\nself.segmentor = Segmentor(parameters, self.anatomy_factors)", "std = torch.exp(0.5 * logvar)\neps = torch.randn_like(std)\nreturn mu + eps * std", "if x.shape[1] > 1:\n x = x[:, 0:1, :, :]\nanatomy_factors = F.gumbel_softmax(self.cencoder(x), hard=True, dim=1)\nmu, logvar = self.mencoder(x, anatomy_factors)\nmodality_factors = SDNet.reparameterize(mu, logvar)\nsm = self.segmentor(anatomy_factors)\nreco = self.decoder(anatomy_factors, modality_factors)\nmodality_factors_reencoded, _ = self.mencoder(reco, anatomy_factors)\nreturn (sm, reco, mu, logvar, modality_factors_reencoded)"], "bodies_text": "<|body_start_0|>\n super(SDNet, self).__init__(parameters)\n self.anatomy_factors = 8\n self.modality_factors = 8\n if parameters['patch_size'] != [224, 224, 1]:\n print('WARNING: The patch size is not 224x224, which is required for sdnet. Using default patch size instead', file=sys.stderr)\n parameters['patch_size'] = [224, 224, 1]\n if parameters['batch_size'] == 1:\n raise ValueError(\"'batch_size' needs to be greater than 1 for 'sdnet'\")\n parameters['model']['amp'] = False\n parameters['model']['norm_type'] = 'instance'\n parameters_unet = deepcopy(parameters)\n parameters_unet['model']['num_classes'] = self.anatomy_factors\n parameters_unet['model']['norm_type'] = 'instance'\n parameters_unet['model']['final_layer'] = None\n self.cencoder = unet(parameters_unet)\n self.mencoder = ModalityEncoder(parameters, self.anatomy_factors, self.modality_factors)\n self.decoder = Decoder(parameters, self.anatomy_factors)\n self.segmentor = Segmentor(parameters, self.anatomy_factors)\n<|end_body_0|>\n\n<|body_start_1|>\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n<|end_body_1|>\n\n<|body_start_2|>\n if x.shape[1] > 1:\n x = x[:, 0:1, :, :]\n anatomy_factors = F.gumbel_softmax(self.cencoder(x), hard=True, dim=1)\n mu, logvar = self.mencoder(x, anatomy_factors)\n modality_factors = SDNet.reparameterize(mu, logvar)\n sm = self.segmentor(anatomy_factors)\n reco = self.decoder(anatomy_factors, modality_factors)\n modality_factors_reencoded, _ = self.mencoder(reco, anatomy_factors)\n return (sm, reco, mu, logvar, modality_factors_reencoded)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SDNet", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SDNet:\n\n def __init__(self, parameters: dict):\n \"\"\"SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\"\"\"\n <|body_0|>\n\n def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]:\n \"\"\"Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SDNet, self).__init__(parameters)\n self.anatomy_factors = 8\n self.modality_factors = 8\n if parameters['patch_size'] != [224, 224, 1]:\n print('WARNING: The patch size is not 224x224, which is required for sdnet. Using default patch size instead', file=sys.stderr)\n parameters['patch_size'] = [224, 224, 1]\n if parameters['batch_size'] == 1:\n raise ValueError(\"'batch_size' needs to be greater than 1 for 'sdnet'\")\n parameters['model']['amp'] = False\n parameters['model']['norm_type'] = 'instance'\n parameters_unet = deepcopy(parameters)\n parameters_unet['model']['num_classes'] = self.anatomy_factors\n parameters_unet['model']['norm_type'] = 'instance'\n parameters_unet['model']['final_layer'] = None\n self.cencoder = unet(parameters_unet)\n self.mencoder = ModalityEncoder(parameters, self.anatomy_factors, self.modality_factors)\n self.decoder = Decoder(parameters, self.anatomy_factors)\n self.segmentor = Segmentor(parameters, self.anatomy_factors)\n<|end_body_0|>\n\n<|body_start_1|>\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n<|end_body_1|>\n\n<|body_start_2|>\n if x.shape[1] > 1:\n x = x[:, 0:1, :, :]\n anatomy_factors = F.gumbel_softmax(self.cencoder(x), hard=True, dim=1)\n mu, logvar = self.mencoder(x, anatomy_factors)\n modality_factors = SDNet.reparameterize(mu, logvar)\n sm = self.segmentor(anatomy_factors)\n reco = self.decoder(anatomy_factors, modality_factors)\n modality_factors_reencoded, _ = self.mencoder(reco, anatomy_factors)\n return (sm, reco, mu, logvar, modality_factors_reencoded)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000407", "length_bytes": 14834, "license_type": "permissive", "methods": [{"docstring": "SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.", "name": "__init__", "signature": "def __init__(self, parameters: dict)"}, {"docstring": "Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.", "name": "reparameterize", "signature": "def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor"}, {"docstring": "Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).", "name": "forward", "signature": "def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006576", "prompt": "Implement the Python class `SDNet` described below.\n\nClass description:\nImplement the SDNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters: dict): SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\n- def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor: Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\n- def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]: Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).", "prompted_full_text": "Implement the Python class `SDNet` described below.\n\nClass description:\nImplement the SDNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters: dict): SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\n- def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor: Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\n- def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]: Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).\n\n<|skeleton|>\nclass SDNet:\n\n def __init__(self, parameters: dict):\n \"\"\"SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\"\"\"\n <|body_0|>\n\n def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]:\n \"\"\"Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SDNet, self).__init__(parameters)\n self.anatomy_factors = 8\n self.modality_factors = 8\n if parameters['patch_size'] != [224, 224, 1]:\n print('WARNING: The patch size is not 224x224, which is required for sdnet. Using default patch size instead', file=sys.stderr)\n parameters['patch_size'] = [224, 224, 1]\n if parameters['batch_size'] == 1:\n raise ValueError(\"'batch_size' needs to be greater than 1 for 'sdnet'\")\n parameters['model']['amp'] = False\n parameters['model']['norm_type'] = 'instance'\n parameters_unet = deepcopy(parameters)\n parameters_unet['model']['num_classes'] = self.anatomy_factors\n parameters_unet['model']['norm_type'] = 'instance'\n parameters_unet['model']['final_layer'] = None\n self.cencoder = unet(parameters_unet)\n self.mencoder = ModalityEncoder(parameters, self.anatomy_factors, self.modality_factors)\n self.decoder = Decoder(parameters, self.anatomy_factors)\n self.segmentor = Segmentor(parameters, self.anatomy_factors)\n<|end_body_0|>\n\n<|body_start_1|>\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n<|end_body_1|>\n\n<|body_start_2|>\n if x.shape[1] > 1:\n x = x[:, 0:1, :, :]\n anatomy_factors = F.gumbel_softmax(self.cencoder(x), hard=True, dim=1)\n mu, logvar = self.mencoder(x, anatomy_factors)\n modality_factors = SDNet.reparameterize(mu, logvar)\n sm = self.segmentor(anatomy_factors)\n reco = self.decoder(anatomy_factors, modality_factors)\n modality_factors_reencoded, _ = self.mencoder(reco, anatomy_factors)\n return (sm, reco, mu, logvar, modality_factors_reencoded)\n<|end_body_2|>\n", "revision_id": "72eb99f68205afd5f8d49a3bb6cfc08cfd467582", "skeleton": "<|skeleton|>\nclass SDNet:\n\n def __init__(self, parameters: dict):\n \"\"\"SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\"\"\"\n <|body_0|>\n\n def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]:\n \"\"\"Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SDNet:\n def __init__(self, parameters: dict):\n \"\"\"SDNet (Structure-Disentangled Network) module. Args: parameters (dict): A dictionary containing model parameters. Attributes: anatomy_factors (int): The number of anatomical factors to be considered. modality_factors (int): The number of modality factors to be considered. cencoder (unet): U-Net based Content Encoder for generating anatomy factors. mencoder (ModalityEncoder): Modality Encoder for generating modality factors. decoder (Decoder): Decoder module for generating the reconstructed image. segmentor (Segmentor): Segmentor module for generating the segmentation map.\"\"\"\n super(SDNet, self).__init__(parameters)\n self.anatomy_factors = 8\n self.modality_factors = 8\n if parameters['patch_size'] != [224, 224, 1]:\n print('WARNING: The patch size is not 224x224, which is required for sdnet. Using default patch size instead', file=sys.stderr)\n parameters['patch_size'] = [224, 224, 1]\n if parameters['batch_size'] == 1:\n raise ValueError(\"'batch_size' needs to be greater than 1 for 'sdnet'\")\n parameters['model']['amp'] = False\n parameters['model']['norm_type'] = 'instance'\n parameters_unet = deepcopy(parameters)\n parameters_unet['model']['num_classes'] = self.anatomy_factors\n parameters_unet['model']['norm_type'] = 'instance'\n parameters_unet['model']['final_layer'] = None\n self.cencoder = unet(parameters_unet)\n self.mencoder = ModalityEncoder(parameters, self.anatomy_factors, self.modality_factors)\n self.decoder = Decoder(parameters, self.anatomy_factors)\n self.segmentor = Segmentor(parameters, self.anatomy_factors)\n\n def reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n \"\"\"Reparameterization trick for sampling from a Gaussian distribution. Args: mu (torch.Tensor): Mean of the Gaussian distribution. logvar (torch.Tensor): Log variance of the Gaussian distribution. Returns: torch.Tensor: Sampled value from the Gaussian distribution.\"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n\n def forward(self, x: torch.Tensor) -> typing.List[torch.Tensor]:\n \"\"\"Forward pass of the SDNet module. Args: x (torch.Tensor): Input tensor (image data). Returns: typing.List[torch.Tensor]: List containing the segmentation map (sm), reconstructed image (reco), mean (mu), log variance (logvar), and re-encoded modality factors (modality_factors_reencoded).\"\"\"\n if x.shape[1] > 1:\n x = x[:, 0:1, :, :]\n anatomy_factors = F.gumbel_softmax(self.cencoder(x), hard=True, dim=1)\n mu, logvar = self.mencoder(x, anatomy_factors)\n modality_factors = SDNet.reparameterize(mu, logvar)\n sm = self.segmentor(anatomy_factors)\n reco = self.decoder(anatomy_factors, modality_factors)\n modality_factors_reencoded, _ = self.mencoder(reco, anatomy_factors)\n return (sm, reco, mu, logvar, modality_factors_reencoded)\n", "source": "the_stack_v2_python_sparse", "source_path": "GANDLF/models/sdnet.py", "source_repo": "mlcommons/GaNDLF", "split": "val", "star_events_count": 45} {"blob_id": "47b22c1aa6c1db7d343da33c986611db35ab9b9a", "bodies": ["points = [(x - y, x + y) for x, y in peaks]\nadjMap = defaultdict(list)\nfor x, y in points:\n adjMap[x].append((x, y))\nkeys = sorted(adjMap)\nres, maxY = (0, -INF)\nfor key in keys:\n group = adjMap[key]\n cur = 0\n for _, py in group:\n if py > maxY:\n maxY = py\n cur = 1\n elif py == maxY:\n cur = 0\n res += cur\nreturn res", "intervals = [(x - y, x + y) for x, y in peaks]\nintervals.sort(key=lambda x: x[0])\ngroups = [list(group) for _, group in groupby(intervals, key=lambda x: x[0])]\nres, maxRight = (0, -INF)\nfor group in groups:\n cur = 0\n for _, right in group:\n if right > maxRight:\n maxRight = right\n cur = 1\n elif right == maxRight:\n cur = 0\n res += cur\nreturn res"], "bodies_text": "<|body_start_0|>\n points = [(x - y, x + y) for x, y in peaks]\n adjMap = defaultdict(list)\n for x, y in points:\n adjMap[x].append((x, y))\n keys = sorted(adjMap)\n res, maxY = (0, -INF)\n for key in keys:\n group = adjMap[key]\n cur = 0\n for _, py in group:\n if py > maxY:\n maxY = py\n cur = 1\n elif py == maxY:\n cur = 0\n res += cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n intervals = [(x - y, x + y) for x, y in peaks]\n intervals.sort(key=lambda x: x[0])\n groups = [list(group) for _, group in groupby(intervals, key=lambda x: x[0])]\n res, maxRight = (0, -INF)\n for group in groups:\n cur = 0\n for _, right in group:\n if right > maxRight:\n maxRight = right\n cur = 1\n elif right == maxRight:\n cur = 0\n res += cur\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def visibleMountains(self, peaks: List[List[int]]) -> int:\n \"\"\"逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\"\"\"\n <|body_0|>\n\n def visibleMountains2(self, peaks: List[List[int]]) -> int:\n \"\"\"不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n points = [(x - y, x + y) for x, y in peaks]\n adjMap = defaultdict(list)\n for x, y in points:\n adjMap[x].append((x, y))\n keys = sorted(adjMap)\n res, maxY = (0, -INF)\n for key in keys:\n group = adjMap[key]\n cur = 0\n for _, py in group:\n if py > maxY:\n maxY = py\n cur = 1\n elif py == maxY:\n cur = 0\n res += cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n intervals = [(x - y, x + y) for x, y in peaks]\n intervals.sort(key=lambda x: x[0])\n groups = [list(group) for _, group in groupby(intervals, key=lambda x: x[0])]\n res, maxRight = (0, -INF)\n for group in groups:\n cur = 0\n for _, right in group:\n if right > maxRight:\n maxRight = right\n cur = 1\n elif right == maxRight:\n cur = 0\n res += cur\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000408", "length_bytes": 2030, "license_type": "no_license", "methods": [{"docstring": "逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含", "name": "visibleMountains", "signature": "def visibleMountains(self, peaks: List[List[int]]) -> int"}, {"docstring": "不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度", "name": "visibleMountains2", "signature": "def visibleMountains2(self, peaks: List[List[int]]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def visibleMountains(self, peaks: List[List[int]]) -> int: 逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\n- def visibleMountains2(self, peaks: List[List[int]]) -> int: 不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def visibleMountains(self, peaks: List[List[int]]) -> int: 逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\n- def visibleMountains2(self, peaks: List[List[int]]) -> int: 不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度\n\n<|skeleton|>\nclass Solution:\n\n def visibleMountains(self, peaks: List[List[int]]) -> int:\n \"\"\"逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\"\"\"\n <|body_0|>\n\n def visibleMountains2(self, peaks: List[List[int]]) -> int:\n \"\"\"不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n points = [(x - y, x + y) for x, y in peaks]\n adjMap = defaultdict(list)\n for x, y in points:\n adjMap[x].append((x, y))\n keys = sorted(adjMap)\n res, maxY = (0, -INF)\n for key in keys:\n group = adjMap[key]\n cur = 0\n for _, py in group:\n if py > maxY:\n maxY = py\n cur = 1\n elif py == maxY:\n cur = 0\n res += cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n intervals = [(x - y, x + y) for x, y in peaks]\n intervals.sort(key=lambda x: x[0])\n groups = [list(group) for _, group in groupby(intervals, key=lambda x: x[0])]\n res, maxRight = (0, -INF)\n for group in groups:\n cur = 0\n for _, right in group:\n if right > maxRight:\n maxRight = right\n cur = 1\n elif right == maxRight:\n cur = 0\n res += cur\n return res\n<|end_body_1|>\n", "revision_id": "7e79e26bb8f641868561b186e34c1127ed63c9e0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def visibleMountains(self, peaks: List[List[int]]) -> int:\n \"\"\"逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\"\"\"\n <|body_0|>\n\n def visibleMountains2(self, peaks: List[List[int]]) -> int:\n \"\"\"不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def visibleMountains(self, peaks: List[List[int]]) -> int:\n \"\"\"逆时针旋转点 + 二维偏序看这个点是否被其他山峰包含\"\"\"\n points = [(x - y, x + y) for x, y in peaks]\n adjMap = defaultdict(list)\n for x, y in points:\n adjMap[x].append((x, y))\n keys = sorted(adjMap)\n res, maxY = (0, -INF)\n for key in keys:\n group = adjMap[key]\n cur = 0\n for _, py in group:\n if py > maxY:\n maxY = py\n cur = 1\n elif py == maxY:\n cur = 0\n res += cur\n return res\n\n def visibleMountains2(self, peaks: List[List[int]]) -> int:\n \"\"\"不旋转点 把每个山对应到x轴的区间上 一个维度排序 维护另一个维度\"\"\"\n intervals = [(x - y, x + y) for x, y in peaks]\n intervals.sort(key=lambda x: x[0])\n groups = [list(group) for _, group in groupby(intervals, key=lambda x: x[0])]\n res, maxRight = (0, -INF)\n for group in groups:\n cur = 0\n for _, right in group:\n if right > maxRight:\n maxRight = right\n cur = 1\n elif right == maxRight:\n cur = 0\n res += cur\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "4_set/有序集合/二维偏序/2345. Finding the Number of Visible Mountains.py", "source_repo": "981377660LMT/algorithm-study", "split": "val", "star_events_count": 225} {"blob_id": "d319eeb40b5b8933c36a41403b7e45d9ebebff22", "bodies": ["super().__init__(main_window)\nself.hide()\nself.setGraphicsEffect(utils.get_shadow())\nmain_window.communication.resized.connect(self._move)\nmain_window.communication.action_button_toggle.connect(self.toggle_state)", "w = int(width * 0.08)\nself.setStyleSheet(self.QSS.format(x=int(w / 3), y=int(w / 2), z=w))\nself.setFixedSize(w, w)\nself.move(width - self.width() * 1.5, waterline - self.height() / 2)\nself.raise_()", "if not is_visible:\n self.hide()\n return\nself.setIcon(QIcon(self.ICONS[icon]))\ntry:\n self.clicked.disconnect()\nexcept TypeError:\n pass\nself.clicked.connect(function)\nself.show()\nself.raise_()"], "bodies_text": "<|body_start_0|>\n super().__init__(main_window)\n self.hide()\n self.setGraphicsEffect(utils.get_shadow())\n main_window.communication.resized.connect(self._move)\n main_window.communication.action_button_toggle.connect(self.toggle_state)\n<|end_body_0|>\n\n<|body_start_1|>\n w = int(width * 0.08)\n self.setStyleSheet(self.QSS.format(x=int(w / 3), y=int(w / 2), z=w))\n self.setFixedSize(w, w)\n self.move(width - self.width() * 1.5, waterline - self.height() / 2)\n self.raise_()\n<|end_body_1|>\n\n<|body_start_2|>\n if not is_visible:\n self.hide()\n return\n self.setIcon(QIcon(self.ICONS[icon]))\n try:\n self.clicked.disconnect()\n except TypeError:\n pass\n self.clicked.connect(function)\n self.show()\n self.raise_()\n<|end_body_2|>\n", "class_docstring": "Main button for application. Changes callback and icon, depending on current state.", "class_name": "ActionButton", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ActionButton:\n \"\"\"Main button for application. Changes callback and icon, depending on current state.\"\"\"\n\n def __init__(self, main_window):\n \"\"\"Connect signals. Hide the button, it will be shown only when required signals are emited\"\"\"\n <|body_0|>\n\n def _move(self, width, waterline):\n \"\"\"Move the button when application is resized.\"\"\"\n <|body_1|>\n\n def toggle_state(self, is_visible, icon, function):\n \"\"\"Hide/show the button. Change icon. Change callback.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(main_window)\n self.hide()\n self.setGraphicsEffect(utils.get_shadow())\n main_window.communication.resized.connect(self._move)\n main_window.communication.action_button_toggle.connect(self.toggle_state)\n<|end_body_0|>\n\n<|body_start_1|>\n w = int(width * 0.08)\n self.setStyleSheet(self.QSS.format(x=int(w / 3), y=int(w / 2), z=w))\n self.setFixedSize(w, w)\n self.move(width - self.width() * 1.5, waterline - self.height() / 2)\n self.raise_()\n<|end_body_1|>\n\n<|body_start_2|>\n if not is_visible:\n self.hide()\n return\n self.setIcon(QIcon(self.ICONS[icon]))\n try:\n self.clicked.disconnect()\n except TypeError:\n pass\n self.clicked.connect(function)\n self.show()\n self.raise_()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000409", "length_bytes": 2194, "license_type": "no_license", "methods": [{"docstring": "Connect signals. Hide the button, it will be shown only when required signals are emited", "name": "__init__", "signature": "def __init__(self, main_window)"}, {"docstring": "Move the button when application is resized.", "name": "_move", "signature": "def _move(self, width, waterline)"}, {"docstring": "Hide/show the button. Change icon. Change callback.", "name": "toggle_state", "signature": "def toggle_state(self, is_visible, icon, function)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003261", "prompt": "Implement the Python class `ActionButton` described below.\n\nClass description:\nMain button for application. Changes callback and icon, depending on current state.\n\nMethod signatures and docstrings:\n- def __init__(self, main_window): Connect signals. Hide the button, it will be shown only when required signals are emited\n- def _move(self, width, waterline): Move the button when application is resized.\n- def toggle_state(self, is_visible, icon, function): Hide/show the button. Change icon. Change callback.", "prompted_full_text": "Implement the Python class `ActionButton` described below.\n\nClass description:\nMain button for application. Changes callback and icon, depending on current state.\n\nMethod signatures and docstrings:\n- def __init__(self, main_window): Connect signals. Hide the button, it will be shown only when required signals are emited\n- def _move(self, width, waterline): Move the button when application is resized.\n- def toggle_state(self, is_visible, icon, function): Hide/show the button. Change icon. Change callback.\n\n<|skeleton|>\nclass ActionButton:\n \"\"\"Main button for application. Changes callback and icon, depending on current state.\"\"\"\n\n def __init__(self, main_window):\n \"\"\"Connect signals. Hide the button, it will be shown only when required signals are emited\"\"\"\n <|body_0|>\n\n def _move(self, width, waterline):\n \"\"\"Move the button when application is resized.\"\"\"\n <|body_1|>\n\n def toggle_state(self, is_visible, icon, function):\n \"\"\"Hide/show the button. Change icon. Change callback.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(main_window)\n self.hide()\n self.setGraphicsEffect(utils.get_shadow())\n main_window.communication.resized.connect(self._move)\n main_window.communication.action_button_toggle.connect(self.toggle_state)\n<|end_body_0|>\n\n<|body_start_1|>\n w = int(width * 0.08)\n self.setStyleSheet(self.QSS.format(x=int(w / 3), y=int(w / 2), z=w))\n self.setFixedSize(w, w)\n self.move(width - self.width() * 1.5, waterline - self.height() / 2)\n self.raise_()\n<|end_body_1|>\n\n<|body_start_2|>\n if not is_visible:\n self.hide()\n return\n self.setIcon(QIcon(self.ICONS[icon]))\n try:\n self.clicked.disconnect()\n except TypeError:\n pass\n self.clicked.connect(function)\n self.show()\n self.raise_()\n<|end_body_2|>\n", "revision_id": "606e188e88ee3a2b2e1daee60c71948c678228e1", "skeleton": "<|skeleton|>\nclass ActionButton:\n \"\"\"Main button for application. Changes callback and icon, depending on current state.\"\"\"\n\n def __init__(self, main_window):\n \"\"\"Connect signals. Hide the button, it will be shown only when required signals are emited\"\"\"\n <|body_0|>\n\n def _move(self, width, waterline):\n \"\"\"Move the button when application is resized.\"\"\"\n <|body_1|>\n\n def toggle_state(self, is_visible, icon, function):\n \"\"\"Hide/show the button. Change icon. Change callback.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ActionButton:\n \"\"\"Main button for application. Changes callback and icon, depending on current state.\"\"\"\n\n def __init__(self, main_window):\n \"\"\"Connect signals. Hide the button, it will be shown only when required signals are emited\"\"\"\n super().__init__(main_window)\n self.hide()\n self.setGraphicsEffect(utils.get_shadow())\n main_window.communication.resized.connect(self._move)\n main_window.communication.action_button_toggle.connect(self.toggle_state)\n\n def _move(self, width, waterline):\n \"\"\"Move the button when application is resized.\"\"\"\n w = int(width * 0.08)\n self.setStyleSheet(self.QSS.format(x=int(w / 3), y=int(w / 2), z=w))\n self.setFixedSize(w, w)\n self.move(width - self.width() * 1.5, waterline - self.height() / 2)\n self.raise_()\n\n def toggle_state(self, is_visible, icon, function):\n \"\"\"Hide/show the button. Change icon. Change callback.\"\"\"\n if not is_visible:\n self.hide()\n return\n self.setIcon(QIcon(self.ICONS[icon]))\n try:\n self.clicked.disconnect()\n except TypeError:\n pass\n self.clicked.connect(function)\n self.show()\n self.raise_()\n", "source": "the_stack_v2_python_sparse", "source_path": "Hospital-Helper-2-master/app/gui/action_button.py", "source_repo": "JoaoBueno/estudos-python", "split": "val", "star_events_count": 2} {"blob_id": "9f90abb3bdcaf58118bf3455587e27a8fccc0069", "bodies": ["super(EncoderImageFull, self).__init__()\nself.embed_size = embed_size\nself.no_imgnorm = no_imgnorm\nself.use_abs = use_abs\nmodel = get_model(name=cnn_type, num_classes=5607)\nmodel = torch.nn.DataParallel(model)\nmodel.to('cuda')\ncheckpoint = torch.load('/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar')\nmodel.load_state_dict(checkpoint['state_dict'])\nprint('Successfully load the saved model at model_best.tar')\nself.cnn = model\nfor param in self.cnn.parameters():\n param.requires_grad = False\nif cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\nelse:\n print('error in chosing the architecture')\n return\nself.init_weights()", "if 'cnn.classifier.1.weight' in state_dict:\n state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight']\n del state_dict['cnn.classifier.1.weight']\n state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias']\n del state_dict['cnn.classifier.1.bias']\n state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight']\n del state_dict['cnn.classifier.4.weight']\n state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias']\n del state_dict['cnn.classifier.4.bias']\nsuper(EncoderImageFull, self).load_state_dict(state_dict)", "r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)\nself.fc.weight.data.uniform_(-r, r)\nself.fc.bias.data.fill_(0)", "features = self.cnn(images)\nfeatures = l2norm(features)\nfeatures = self.fc(features)\nif not self.no_imgnorm:\n features = l2norm(features)\nif self.use_abs:\n features = torch.abs(features)\nreturn features"], "bodies_text": "<|body_start_0|>\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to('cuda')\n checkpoint = torch.load('/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar')\n model.load_state_dict(checkpoint['state_dict'])\n print('Successfully load the saved model at model_best.tar')\n self.cnn = model\n for param in self.cnn.parameters():\n param.requires_grad = False\n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print('error in chosing the architecture')\n return\n self.init_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'cnn.classifier.1.weight' in state_dict:\n state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight']\n del state_dict['cnn.classifier.1.weight']\n state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias']\n del state_dict['cnn.classifier.1.bias']\n state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight']\n del state_dict['cnn.classifier.4.weight']\n state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias']\n del state_dict['cnn.classifier.4.bias']\n super(EncoderImageFull, self).load_state_dict(state_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n<|end_body_2|>\n\n<|body_start_3|>\n features = self.cnn(images)\n features = l2norm(features)\n features = self.fc(features)\n if not self.no_imgnorm:\n features = l2norm(features)\n if self.use_abs:\n features = torch.abs(features)\n return features\n<|end_body_3|>\n", "class_docstring": "", "class_name": "EncoderImageFull", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncoderImageFull:\n\n def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False):\n \"\"\"Load pretrained VGG19 and replace top fc layer.\"\"\"\n <|body_0|>\n\n def load_state_dict(self, load_path):\n \"\"\"Handle the models saved before commit pytorch/vision@989d52a\"\"\"\n <|body_1|>\n\n def init_weights(self):\n \"\"\"Xavier initialization for the fully connected layer\"\"\"\n <|body_2|>\n\n def forward(self, images):\n \"\"\"Extract image feature vectors.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to('cuda')\n checkpoint = torch.load('/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar')\n model.load_state_dict(checkpoint['state_dict'])\n print('Successfully load the saved model at model_best.tar')\n self.cnn = model\n for param in self.cnn.parameters():\n param.requires_grad = False\n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print('error in chosing the architecture')\n return\n self.init_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'cnn.classifier.1.weight' in state_dict:\n state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight']\n del state_dict['cnn.classifier.1.weight']\n state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias']\n del state_dict['cnn.classifier.1.bias']\n state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight']\n del state_dict['cnn.classifier.4.weight']\n state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias']\n del state_dict['cnn.classifier.4.bias']\n super(EncoderImageFull, self).load_state_dict(state_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n<|end_body_2|>\n\n<|body_start_3|>\n features = self.cnn(images)\n features = l2norm(features)\n features = self.fc(features)\n if not self.no_imgnorm:\n features = l2norm(features)\n if self.use_abs:\n features = torch.abs(features)\n return features\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000410", "length_bytes": 22197, "license_type": "no_license", "methods": [{"docstring": "Load pretrained VGG19 and replace top fc layer.", "name": "__init__", "signature": "def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False)"}, {"docstring": "Handle the models saved before commit pytorch/vision@989d52a", "name": "load_state_dict", "signature": "def load_state_dict(self, load_path)"}, {"docstring": "Xavier initialization for the fully connected layer", "name": "init_weights", "signature": "def init_weights(self)"}, {"docstring": "Extract image feature vectors.", "name": "forward", "signature": "def forward(self, images)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002363", "prompt": "Implement the Python class `EncoderImageFull` described below.\n\nClass description:\nImplement the EncoderImageFull class.\n\nMethod signatures and docstrings:\n- def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False): Load pretrained VGG19 and replace top fc layer.\n- def load_state_dict(self, load_path): Handle the models saved before commit pytorch/vision@989d52a\n- def init_weights(self): Xavier initialization for the fully connected layer\n- def forward(self, images): Extract image feature vectors.", "prompted_full_text": "Implement the Python class `EncoderImageFull` described below.\n\nClass description:\nImplement the EncoderImageFull class.\n\nMethod signatures and docstrings:\n- def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False): Load pretrained VGG19 and replace top fc layer.\n- def load_state_dict(self, load_path): Handle the models saved before commit pytorch/vision@989d52a\n- def init_weights(self): Xavier initialization for the fully connected layer\n- def forward(self, images): Extract image feature vectors.\n\n<|skeleton|>\nclass EncoderImageFull:\n\n def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False):\n \"\"\"Load pretrained VGG19 and replace top fc layer.\"\"\"\n <|body_0|>\n\n def load_state_dict(self, load_path):\n \"\"\"Handle the models saved before commit pytorch/vision@989d52a\"\"\"\n <|body_1|>\n\n def init_weights(self):\n \"\"\"Xavier initialization for the fully connected layer\"\"\"\n <|body_2|>\n\n def forward(self, images):\n \"\"\"Extract image feature vectors.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to('cuda')\n checkpoint = torch.load('/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar')\n model.load_state_dict(checkpoint['state_dict'])\n print('Successfully load the saved model at model_best.tar')\n self.cnn = model\n for param in self.cnn.parameters():\n param.requires_grad = False\n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print('error in chosing the architecture')\n return\n self.init_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'cnn.classifier.1.weight' in state_dict:\n state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight']\n del state_dict['cnn.classifier.1.weight']\n state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias']\n del state_dict['cnn.classifier.1.bias']\n state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight']\n del state_dict['cnn.classifier.4.weight']\n state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias']\n del state_dict['cnn.classifier.4.bias']\n super(EncoderImageFull, self).load_state_dict(state_dict)\n<|end_body_1|>\n\n<|body_start_2|>\n r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n<|end_body_2|>\n\n<|body_start_3|>\n features = self.cnn(images)\n features = l2norm(features)\n features = self.fc(features)\n if not self.no_imgnorm:\n features = l2norm(features)\n if self.use_abs:\n features = torch.abs(features)\n return features\n<|end_body_3|>\n", "revision_id": "4779d33a921be0c0adaf5971ec853317eb072af1", "skeleton": "<|skeleton|>\nclass EncoderImageFull:\n\n def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False):\n \"\"\"Load pretrained VGG19 and replace top fc layer.\"\"\"\n <|body_0|>\n\n def load_state_dict(self, load_path):\n \"\"\"Handle the models saved before commit pytorch/vision@989d52a\"\"\"\n <|body_1|>\n\n def init_weights(self):\n \"\"\"Xavier initialization for the fully connected layer\"\"\"\n <|body_2|>\n\n def forward(self, images):\n \"\"\"Extract image feature vectors.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EncoderImageFull:\n def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50', use_abs=False, no_imgnorm=False):\n \"\"\"Load pretrained VGG19 and replace top fc layer.\"\"\"\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to('cuda')\n checkpoint = torch.load('/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar')\n model.load_state_dict(checkpoint['state_dict'])\n print('Successfully load the saved model at model_best.tar')\n self.cnn = model\n for param in self.cnn.parameters():\n param.requires_grad = False\n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print('error in chosing the architecture')\n return\n self.init_weights()\n\n def load_state_dict(self, load_path):\n \"\"\"Handle the models saved before commit pytorch/vision@989d52a\"\"\"\n if 'cnn.classifier.1.weight' in state_dict:\n state_dict['cnn.classifier.0.weight'] = state_dict['cnn.classifier.1.weight']\n del state_dict['cnn.classifier.1.weight']\n state_dict['cnn.classifier.0.bias'] = state_dict['cnn.classifier.1.bias']\n del state_dict['cnn.classifier.1.bias']\n state_dict['cnn.classifier.3.weight'] = state_dict['cnn.classifier.4.weight']\n del state_dict['cnn.classifier.4.weight']\n state_dict['cnn.classifier.3.bias'] = state_dict['cnn.classifier.4.bias']\n del state_dict['cnn.classifier.4.bias']\n super(EncoderImageFull, self).load_state_dict(state_dict)\n\n def init_weights(self):\n \"\"\"Xavier initialization for the fully connected layer\"\"\"\n r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\"Extract image feature vectors.\"\"\"\n features = self.cnn(images)\n features = l2norm(features)\n features = self.fc(features)\n if not self.no_imgnorm:\n features = l2norm(features)\n if self.use_abs:\n features = torch.abs(features)\n return features\n", "source": "the_stack_v2_python_sparse", "source_path": "cnn/encoder.py", "source_repo": "bledem/webvision", "split": "val", "star_events_count": 0} {"blob_id": "8d3fbc72f95891fe45b86724380600c7616d8be8", "bodies": ["low = self.action_space.low\nhigh = self.action_space.high\nscale_factor = (high - low) / 2\nreloc_factor = high - scale_factor\naction = action * scale_factor + reloc_factor\naction = np.clip(action, low, high)\nreturn action", "low = self.action_space.low\nhigh = self.action_space.high\nscale_factor = (high - low) / 2\nreloc_factor = high - scale_factor\naction = (action - reloc_factor) / scale_factor\naction = np.clip(action, -1.0, 1.0)\nreturn action"], "bodies_text": "<|body_start_0|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n return action\n<|end_body_0|>\n\n<|body_start_1|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = (action - reloc_factor) / scale_factor\n action = np.clip(action, -1.0, 1.0)\n return action\n<|end_body_1|>\n", "class_docstring": "Rescale and relocate the actions.", "class_name": "ActionNormalizer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ActionNormalizer:\n \"\"\"Rescale and relocate the actions.\"\"\"\n\n def action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (-1, 1) to (low, high).\"\"\"\n <|body_0|>\n\n def reverse_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (low, high) to (-1, 1).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n return action\n<|end_body_0|>\n\n<|body_start_1|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = (action - reloc_factor) / scale_factor\n action = np.clip(action, -1.0, 1.0)\n return action\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000411", "length_bytes": 13315, "license_type": "no_license", "methods": [{"docstring": "Change the range (-1, 1) to (low, high).", "name": "action", "signature": "def action(self, action: np.ndarray) -> np.ndarray"}, {"docstring": "Change the range (low, high) to (-1, 1).", "name": "reverse_action", "signature": "def reverse_action(self, action: np.ndarray) -> np.ndarray"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005990", "prompt": "Implement the Python class `ActionNormalizer` described below.\n\nClass description:\nRescale and relocate the actions.\n\nMethod signatures and docstrings:\n- def action(self, action: np.ndarray) -> np.ndarray: Change the range (-1, 1) to (low, high).\n- def reverse_action(self, action: np.ndarray) -> np.ndarray: Change the range (low, high) to (-1, 1).", "prompted_full_text": "Implement the Python class `ActionNormalizer` described below.\n\nClass description:\nRescale and relocate the actions.\n\nMethod signatures and docstrings:\n- def action(self, action: np.ndarray) -> np.ndarray: Change the range (-1, 1) to (low, high).\n- def reverse_action(self, action: np.ndarray) -> np.ndarray: Change the range (low, high) to (-1, 1).\n\n<|skeleton|>\nclass ActionNormalizer:\n \"\"\"Rescale and relocate the actions.\"\"\"\n\n def action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (-1, 1) to (low, high).\"\"\"\n <|body_0|>\n\n def reverse_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (low, high) to (-1, 1).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n return action\n<|end_body_0|>\n\n<|body_start_1|>\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = (action - reloc_factor) / scale_factor\n action = np.clip(action, -1.0, 1.0)\n return action\n<|end_body_1|>\n", "revision_id": "14ddfb81295c349acc2ede7588ebc73c235246c0", "skeleton": "<|skeleton|>\nclass ActionNormalizer:\n \"\"\"Rescale and relocate the actions.\"\"\"\n\n def action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (-1, 1) to (low, high).\"\"\"\n <|body_0|>\n\n def reverse_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (low, high) to (-1, 1).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ActionNormalizer:\n \"\"\"Rescale and relocate the actions.\"\"\"\n\n def action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (-1, 1) to (low, high).\"\"\"\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n return action\n\n def reverse_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"Change the range (low, high) to (-1, 1).\"\"\"\n low = self.action_space.low\n high = self.action_space.high\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n action = (action - reloc_factor) / scale_factor\n action = np.clip(action, -1.0, 1.0)\n return action\n", "source": "the_stack_v2_python_sparse", "source_path": "PPO_GAE_TEST/PPO_gae_test2.py", "source_repo": "namjiwon1023/Reinforcement_learning", "split": "val", "star_events_count": 2} {"blob_id": "490c541e1e7c845c400c88e892bbf4e0fb7e22cd", "bodies": ["if not head or not head.next or (not head.next.next):\n return head\neven, odd = (head, head.next)\neven_tail, odd_tail = (even, odd)\ni = 2\nnode = head.next.next\nwhile node:\n if i % 2 == 0:\n even_tail.next = node\n even_tail = node\n else:\n odd_tail.next = node\n odd_tail = node\n i += 1\n node = node.next\neven_tail.next = odd\nodd_tail.next = None\nreturn even", "heads = [ListNode(), ListNode()]\ntails = heads[:]\ni = 0\nwhile head:\n tails[i % 2].next = head\n tails[i % 2] = tails[i % 2].next\n head = head.next\n i += 1\ntails[0].next = heads[1].next\ntails[1].next = None\nreturn heads[0].next", "even = ListNode()\nodd = ListNode()\npointers = [even, odd]\ni = 0\nwhile head:\n pointers[i % 2].next = pointers[i % 2] = head\n head = head.next\n i = i + 1\npointers[0].next = odd.next\npointers[1].next = None\nreturn even.next", "eohead = [ListNode(), ListNode()]\neotail = eohead[:]\ni = 0\nnode = head\nwhile node:\n i += 1\n eotail[i % 2].next = node\n eotail[i % 2] = node\n node = node.next\neotail[0].next = None\neotail[1].next = eohead[0].next\nreturn eohead[1].next"], "bodies_text": "<|body_start_0|>\n if not head or not head.next or (not head.next.next):\n return head\n even, odd = (head, head.next)\n even_tail, odd_tail = (even, odd)\n i = 2\n node = head.next.next\n while node:\n if i % 2 == 0:\n even_tail.next = node\n even_tail = node\n else:\n odd_tail.next = node\n odd_tail = node\n i += 1\n node = node.next\n even_tail.next = odd\n odd_tail.next = None\n return even\n<|end_body_0|>\n\n<|body_start_1|>\n heads = [ListNode(), ListNode()]\n tails = heads[:]\n i = 0\n while head:\n tails[i % 2].next = head\n tails[i % 2] = tails[i % 2].next\n head = head.next\n i += 1\n tails[0].next = heads[1].next\n tails[1].next = None\n return heads[0].next\n<|end_body_1|>\n\n<|body_start_2|>\n even = ListNode()\n odd = ListNode()\n pointers = [even, odd]\n i = 0\n while head:\n pointers[i % 2].next = pointers[i % 2] = head\n head = head.next\n i = i + 1\n pointers[0].next = odd.next\n pointers[1].next = None\n return even.next\n<|end_body_2|>\n\n<|body_start_3|>\n eohead = [ListNode(), ListNode()]\n eotail = eohead[:]\n i = 0\n node = head\n while node:\n i += 1\n eotail[i % 2].next = node\n eotail[i % 2] = node\n node = node.next\n eotail[0].next = None\n eotail[1].next = eohead[0].next\n return eohead[1].next\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def oddEvenList(self, head: ListNode) -> ListNode:\n \"\"\"04/06/2020 23:13\"\"\"\n <|body_0|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 09, 2021 10:13\"\"\"\n <|body_1|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 11, 2022 16:08\"\"\"\n <|body_2|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Feb 18, 2023 19:23\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or not head.next or (not head.next.next):\n return head\n even, odd = (head, head.next)\n even_tail, odd_tail = (even, odd)\n i = 2\n node = head.next.next\n while node:\n if i % 2 == 0:\n even_tail.next = node\n even_tail = node\n else:\n odd_tail.next = node\n odd_tail = node\n i += 1\n node = node.next\n even_tail.next = odd\n odd_tail.next = None\n return even\n<|end_body_0|>\n\n<|body_start_1|>\n heads = [ListNode(), ListNode()]\n tails = heads[:]\n i = 0\n while head:\n tails[i % 2].next = head\n tails[i % 2] = tails[i % 2].next\n head = head.next\n i += 1\n tails[0].next = heads[1].next\n tails[1].next = None\n return heads[0].next\n<|end_body_1|>\n\n<|body_start_2|>\n even = ListNode()\n odd = ListNode()\n pointers = [even, odd]\n i = 0\n while head:\n pointers[i % 2].next = pointers[i % 2] = head\n head = head.next\n i = i + 1\n pointers[0].next = odd.next\n pointers[1].next = None\n return even.next\n<|end_body_2|>\n\n<|body_start_3|>\n eohead = [ListNode(), ListNode()]\n eotail = eohead[:]\n i = 0\n node = head\n while node:\n i += 1\n eotail[i % 2].next = node\n eotail[i % 2] = node\n node = node.next\n eotail[0].next = None\n eotail[1].next = eohead[0].next\n return eohead[1].next\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000412", "length_bytes": 3350, "license_type": "no_license", "methods": [{"docstring": "04/06/2020 23:13", "name": "oddEvenList", "signature": "def oddEvenList(self, head: ListNode) -> ListNode"}, {"docstring": "Dec 09, 2021 10:13", "name": "oddEvenList", "signature": "def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]"}, {"docstring": "Dec 11, 2022 16:08", "name": "oddEvenList", "signature": "def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]"}, {"docstring": "Feb 18, 2023 19:23", "name": "oddEvenList", "signature": "def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def oddEvenList(self, head: ListNode) -> ListNode: 04/06/2020 23:13\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Dec 09, 2021 10:13\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Dec 11, 2022 16:08\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Feb 18, 2023 19:23", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def oddEvenList(self, head: ListNode) -> ListNode: 04/06/2020 23:13\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Dec 09, 2021 10:13\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Dec 11, 2022 16:08\n- def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]: Feb 18, 2023 19:23\n\n<|skeleton|>\nclass Solution:\n\n def oddEvenList(self, head: ListNode) -> ListNode:\n \"\"\"04/06/2020 23:13\"\"\"\n <|body_0|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 09, 2021 10:13\"\"\"\n <|body_1|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 11, 2022 16:08\"\"\"\n <|body_2|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Feb 18, 2023 19:23\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head or not head.next or (not head.next.next):\n return head\n even, odd = (head, head.next)\n even_tail, odd_tail = (even, odd)\n i = 2\n node = head.next.next\n while node:\n if i % 2 == 0:\n even_tail.next = node\n even_tail = node\n else:\n odd_tail.next = node\n odd_tail = node\n i += 1\n node = node.next\n even_tail.next = odd\n odd_tail.next = None\n return even\n<|end_body_0|>\n\n<|body_start_1|>\n heads = [ListNode(), ListNode()]\n tails = heads[:]\n i = 0\n while head:\n tails[i % 2].next = head\n tails[i % 2] = tails[i % 2].next\n head = head.next\n i += 1\n tails[0].next = heads[1].next\n tails[1].next = None\n return heads[0].next\n<|end_body_1|>\n\n<|body_start_2|>\n even = ListNode()\n odd = ListNode()\n pointers = [even, odd]\n i = 0\n while head:\n pointers[i % 2].next = pointers[i % 2] = head\n head = head.next\n i = i + 1\n pointers[0].next = odd.next\n pointers[1].next = None\n return even.next\n<|end_body_2|>\n\n<|body_start_3|>\n eohead = [ListNode(), ListNode()]\n eotail = eohead[:]\n i = 0\n node = head\n while node:\n i += 1\n eotail[i % 2].next = node\n eotail[i % 2] = node\n node = node.next\n eotail[0].next = None\n eotail[1].next = eohead[0].next\n return eohead[1].next\n<|end_body_3|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def oddEvenList(self, head: ListNode) -> ListNode:\n \"\"\"04/06/2020 23:13\"\"\"\n <|body_0|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 09, 2021 10:13\"\"\"\n <|body_1|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 11, 2022 16:08\"\"\"\n <|body_2|>\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Feb 18, 2023 19:23\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n \"\"\"04/06/2020 23:13\"\"\"\n if not head or not head.next or (not head.next.next):\n return head\n even, odd = (head, head.next)\n even_tail, odd_tail = (even, odd)\n i = 2\n node = head.next.next\n while node:\n if i % 2 == 0:\n even_tail.next = node\n even_tail = node\n else:\n odd_tail.next = node\n odd_tail = node\n i += 1\n node = node.next\n even_tail.next = odd\n odd_tail.next = None\n return even\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 09, 2021 10:13\"\"\"\n heads = [ListNode(), ListNode()]\n tails = heads[:]\n i = 0\n while head:\n tails[i % 2].next = head\n tails[i % 2] = tails[i % 2].next\n head = head.next\n i += 1\n tails[0].next = heads[1].next\n tails[1].next = None\n return heads[0].next\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Dec 11, 2022 16:08\"\"\"\n even = ListNode()\n odd = ListNode()\n pointers = [even, odd]\n i = 0\n while head:\n pointers[i % 2].next = pointers[i % 2] = head\n head = head.next\n i = i + 1\n pointers[0].next = odd.next\n pointers[1].next = None\n return even.next\n\n def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Feb 18, 2023 19:23\"\"\"\n eohead = [ListNode(), ListNode()]\n eotail = eohead[:]\n i = 0\n node = head\n while node:\n i += 1\n eotail[i % 2].next = node\n eotail[i % 2] = node\n node = node.next\n eotail[0].next = None\n eotail[1].next = eohead[0].next\n return eohead[1].next\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/328_Odd_Even_Linked_List/solution.py", "source_repo": "sungminoh/algorithms", "split": "val", "star_events_count": 0} {"blob_id": "1c64ed9b5b9863a52944e5abddea62a9e264b8ce", "bodies": ["self.negword = 'moins '\nself.pointword = 'virgule'\nself.exclude_title = ['et', 'virgule', 'moins']\nself.mid_numwords = [(1000, 'mille'), (100, 'cent'), (80, 'quatre-vingts'), (60, 'soixante'), (50, 'cinquante'), (40, 'quarante'), (30, 'trente')]\nself.low_numwords = ['vingt', 'dix-neuf', 'dix-huit', 'dix-sept', 'seize', 'quinze', 'quatorze', 'treize', 'douze', 'onze', 'dix', 'neuf', 'huit', 'sept', 'six', 'cinq', 'quatre', 'trois', 'deux', 'un', 'zéro']", "ctext, cnum, ntext, nnum = curr + next\nif cnum == 1:\n if nnum < 1000000:\n return next\nelse:\n if (not (cnum - 80) % 100 or not cnum % 100) and nnum < 1000000 and (ctext[-1] == 's'):\n ctext = ctext[:-1]\n if cnum < 1000 and nnum != 1000 and (ntext[-1] != 's') and (not nnum % 100):\n ntext += 's'\nif nnum < cnum < 100 and nnum % 10 == 1 and (cnum != 80):\n return ('%s-et-%s' % (ctext, ntext), cnum + nnum)\nif nnum >= 1000000 or cnum >= 1000000:\n return ('%s %s' % (ctext, ntext), cnum + nnum)\nreturn ('%s-%s' % (ctext, ntext), cnum + nnum)", "self._verify_ordinal(value)\nif value == 1:\n return 'premier'\nword = self.cardinal(value)\nif word[-1] == 'e' or word[-1] == 's':\n word = word[:-1]\nif word[-1] == 'f':\n word = word[:-1] + 'v'\nreturn word + 'ième'", "self._verify_ordinal(value)\nout = str(value)\nout += {'1': 'er'}.get(out[-1], 'me')\nreturn out", "hightxt = 'Euro/s'\nif old:\n hightxt = 'franc/s'\nreturn self._split(val, hightxt=hightxt, lowtxt='centime/s', jointxt='et', longval=longval)"], "bodies_text": "<|body_start_0|>\n self.negword = 'moins '\n self.pointword = 'virgule'\n self.exclude_title = ['et', 'virgule', 'moins']\n self.mid_numwords = [(1000, 'mille'), (100, 'cent'), (80, 'quatre-vingts'), (60, 'soixante'), (50, 'cinquante'), (40, 'quarante'), (30, 'trente')]\n self.low_numwords = ['vingt', 'dix-neuf', 'dix-huit', 'dix-sept', 'seize', 'quinze', 'quatorze', 'treize', 'douze', 'onze', 'dix', 'neuf', 'huit', 'sept', 'six', 'cinq', 'quatre', 'trois', 'deux', 'un', 'zéro']\n<|end_body_0|>\n\n<|body_start_1|>\n ctext, cnum, ntext, nnum = curr + next\n if cnum == 1:\n if nnum < 1000000:\n return next\n else:\n if (not (cnum - 80) % 100 or not cnum % 100) and nnum < 1000000 and (ctext[-1] == 's'):\n ctext = ctext[:-1]\n if cnum < 1000 and nnum != 1000 and (ntext[-1] != 's') and (not nnum % 100):\n ntext += 's'\n if nnum < cnum < 100 and nnum % 10 == 1 and (cnum != 80):\n return ('%s-et-%s' % (ctext, ntext), cnum + nnum)\n if nnum >= 1000000 or cnum >= 1000000:\n return ('%s %s' % (ctext, ntext), cnum + nnum)\n return ('%s-%s' % (ctext, ntext), cnum + nnum)\n<|end_body_1|>\n\n<|body_start_2|>\n self._verify_ordinal(value)\n if value == 1:\n return 'premier'\n word = self.cardinal(value)\n if word[-1] == 'e' or word[-1] == 's':\n word = word[:-1]\n if word[-1] == 'f':\n word = word[:-1] + 'v'\n return word + 'ième'\n<|end_body_2|>\n\n<|body_start_3|>\n self._verify_ordinal(value)\n out = str(value)\n out += {'1': 'er'}.get(out[-1], 'me')\n return out\n<|end_body_3|>\n\n<|body_start_4|>\n hightxt = 'Euro/s'\n if old:\n hightxt = 'franc/s'\n return self._split(val, hightxt=hightxt, lowtxt='centime/s', jointxt='et', longval=longval)\n<|end_body_4|>\n", "class_docstring": "NumWord FR", "class_name": "NumWordFR", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumWordFR:\n \"\"\"NumWord FR\"\"\"\n\n def _setup(self):\n \"\"\"Setup\"\"\"\n <|body_0|>\n\n def _merge(self, curr, next):\n \"\"\"Merge\"\"\"\n <|body_1|>\n\n def ordinal(self, value):\n \"\"\"Convert to ordinal\"\"\"\n <|body_2|>\n\n def ordinal_number(self, value):\n \"\"\"Convert to ordinal number\"\"\"\n <|body_3|>\n\n def currency(self, val, longval=True, old=False):\n \"\"\"Convert to currency\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.negword = 'moins '\n self.pointword = 'virgule'\n self.exclude_title = ['et', 'virgule', 'moins']\n self.mid_numwords = [(1000, 'mille'), (100, 'cent'), (80, 'quatre-vingts'), (60, 'soixante'), (50, 'cinquante'), (40, 'quarante'), (30, 'trente')]\n self.low_numwords = ['vingt', 'dix-neuf', 'dix-huit', 'dix-sept', 'seize', 'quinze', 'quatorze', 'treize', 'douze', 'onze', 'dix', 'neuf', 'huit', 'sept', 'six', 'cinq', 'quatre', 'trois', 'deux', 'un', 'zéro']\n<|end_body_0|>\n\n<|body_start_1|>\n ctext, cnum, ntext, nnum = curr + next\n if cnum == 1:\n if nnum < 1000000:\n return next\n else:\n if (not (cnum - 80) % 100 or not cnum % 100) and nnum < 1000000 and (ctext[-1] == 's'):\n ctext = ctext[:-1]\n if cnum < 1000 and nnum != 1000 and (ntext[-1] != 's') and (not nnum % 100):\n ntext += 's'\n if nnum < cnum < 100 and nnum % 10 == 1 and (cnum != 80):\n return ('%s-et-%s' % (ctext, ntext), cnum + nnum)\n if nnum >= 1000000 or cnum >= 1000000:\n return ('%s %s' % (ctext, ntext), cnum + nnum)\n return ('%s-%s' % (ctext, ntext), cnum + nnum)\n<|end_body_1|>\n\n<|body_start_2|>\n self._verify_ordinal(value)\n if value == 1:\n return 'premier'\n word = self.cardinal(value)\n if word[-1] == 'e' or word[-1] == 's':\n word = word[:-1]\n if word[-1] == 'f':\n word = word[:-1] + 'v'\n return word + 'ième'\n<|end_body_2|>\n\n<|body_start_3|>\n self._verify_ordinal(value)\n out = str(value)\n out += {'1': 'er'}.get(out[-1], 'me')\n return out\n<|end_body_3|>\n\n<|body_start_4|>\n hightxt = 'Euro/s'\n if old:\n hightxt = 'franc/s'\n return self._split(val, hightxt=hightxt, lowtxt='centime/s', jointxt='et', longval=longval)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000413", "length_bytes": 3430, "license_type": "permissive", "methods": [{"docstring": "Setup", "name": "_setup", "signature": "def _setup(self)"}, {"docstring": "Merge", "name": "_merge", "signature": "def _merge(self, curr, next)"}, {"docstring": "Convert to ordinal", "name": "ordinal", "signature": "def ordinal(self, value)"}, {"docstring": "Convert to ordinal number", "name": "ordinal_number", "signature": "def ordinal_number(self, value)"}, {"docstring": "Convert to currency", "name": "currency", "signature": "def currency(self, val, longval=True, old=False)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005555", "prompt": "Implement the Python class `NumWordFR` described below.\n\nClass description:\nNumWord FR\n\nMethod signatures and docstrings:\n- def _setup(self): Setup\n- def _merge(self, curr, next): Merge\n- def ordinal(self, value): Convert to ordinal\n- def ordinal_number(self, value): Convert to ordinal number\n- def currency(self, val, longval=True, old=False): Convert to currency", "prompted_full_text": "Implement the Python class `NumWordFR` described below.\n\nClass description:\nNumWord FR\n\nMethod signatures and docstrings:\n- def _setup(self): Setup\n- def _merge(self, curr, next): Merge\n- def ordinal(self, value): Convert to ordinal\n- def ordinal_number(self, value): Convert to ordinal number\n- def currency(self, val, longval=True, old=False): Convert to currency\n\n<|skeleton|>\nclass NumWordFR:\n \"\"\"NumWord FR\"\"\"\n\n def _setup(self):\n \"\"\"Setup\"\"\"\n <|body_0|>\n\n def _merge(self, curr, next):\n \"\"\"Merge\"\"\"\n <|body_1|>\n\n def ordinal(self, value):\n \"\"\"Convert to ordinal\"\"\"\n <|body_2|>\n\n def ordinal_number(self, value):\n \"\"\"Convert to ordinal number\"\"\"\n <|body_3|>\n\n def currency(self, val, longval=True, old=False):\n \"\"\"Convert to currency\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.negword = 'moins '\n self.pointword = 'virgule'\n self.exclude_title = ['et', 'virgule', 'moins']\n self.mid_numwords = [(1000, 'mille'), (100, 'cent'), (80, 'quatre-vingts'), (60, 'soixante'), (50, 'cinquante'), (40, 'quarante'), (30, 'trente')]\n self.low_numwords = ['vingt', 'dix-neuf', 'dix-huit', 'dix-sept', 'seize', 'quinze', 'quatorze', 'treize', 'douze', 'onze', 'dix', 'neuf', 'huit', 'sept', 'six', 'cinq', 'quatre', 'trois', 'deux', 'un', 'zéro']\n<|end_body_0|>\n\n<|body_start_1|>\n ctext, cnum, ntext, nnum = curr + next\n if cnum == 1:\n if nnum < 1000000:\n return next\n else:\n if (not (cnum - 80) % 100 or not cnum % 100) and nnum < 1000000 and (ctext[-1] == 's'):\n ctext = ctext[:-1]\n if cnum < 1000 and nnum != 1000 and (ntext[-1] != 's') and (not nnum % 100):\n ntext += 's'\n if nnum < cnum < 100 and nnum % 10 == 1 and (cnum != 80):\n return ('%s-et-%s' % (ctext, ntext), cnum + nnum)\n if nnum >= 1000000 or cnum >= 1000000:\n return ('%s %s' % (ctext, ntext), cnum + nnum)\n return ('%s-%s' % (ctext, ntext), cnum + nnum)\n<|end_body_1|>\n\n<|body_start_2|>\n self._verify_ordinal(value)\n if value == 1:\n return 'premier'\n word = self.cardinal(value)\n if word[-1] == 'e' or word[-1] == 's':\n word = word[:-1]\n if word[-1] == 'f':\n word = word[:-1] + 'v'\n return word + 'ième'\n<|end_body_2|>\n\n<|body_start_3|>\n self._verify_ordinal(value)\n out = str(value)\n out += {'1': 'er'}.get(out[-1], 'me')\n return out\n<|end_body_3|>\n\n<|body_start_4|>\n hightxt = 'Euro/s'\n if old:\n hightxt = 'franc/s'\n return self._split(val, hightxt=hightxt, lowtxt='centime/s', jointxt='et', longval=longval)\n<|end_body_4|>\n", "revision_id": "bdf0d633663d289a6cb9ed10c1529afb086d410f", "skeleton": "<|skeleton|>\nclass NumWordFR:\n \"\"\"NumWord FR\"\"\"\n\n def _setup(self):\n \"\"\"Setup\"\"\"\n <|body_0|>\n\n def _merge(self, curr, next):\n \"\"\"Merge\"\"\"\n <|body_1|>\n\n def ordinal(self, value):\n \"\"\"Convert to ordinal\"\"\"\n <|body_2|>\n\n def ordinal_number(self, value):\n \"\"\"Convert to ordinal number\"\"\"\n <|body_3|>\n\n def currency(self, val, longval=True, old=False):\n \"\"\"Convert to currency\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NumWordFR:\n \"\"\"NumWord FR\"\"\"\n\n def _setup(self):\n \"\"\"Setup\"\"\"\n self.negword = 'moins '\n self.pointword = 'virgule'\n self.exclude_title = ['et', 'virgule', 'moins']\n self.mid_numwords = [(1000, 'mille'), (100, 'cent'), (80, 'quatre-vingts'), (60, 'soixante'), (50, 'cinquante'), (40, 'quarante'), (30, 'trente')]\n self.low_numwords = ['vingt', 'dix-neuf', 'dix-huit', 'dix-sept', 'seize', 'quinze', 'quatorze', 'treize', 'douze', 'onze', 'dix', 'neuf', 'huit', 'sept', 'six', 'cinq', 'quatre', 'trois', 'deux', 'un', 'zéro']\n\n def _merge(self, curr, next):\n \"\"\"Merge\"\"\"\n ctext, cnum, ntext, nnum = curr + next\n if cnum == 1:\n if nnum < 1000000:\n return next\n else:\n if (not (cnum - 80) % 100 or not cnum % 100) and nnum < 1000000 and (ctext[-1] == 's'):\n ctext = ctext[:-1]\n if cnum < 1000 and nnum != 1000 and (ntext[-1] != 's') and (not nnum % 100):\n ntext += 's'\n if nnum < cnum < 100 and nnum % 10 == 1 and (cnum != 80):\n return ('%s-et-%s' % (ctext, ntext), cnum + nnum)\n if nnum >= 1000000 or cnum >= 1000000:\n return ('%s %s' % (ctext, ntext), cnum + nnum)\n return ('%s-%s' % (ctext, ntext), cnum + nnum)\n\n def ordinal(self, value):\n \"\"\"Convert to ordinal\"\"\"\n self._verify_ordinal(value)\n if value == 1:\n return 'premier'\n word = self.cardinal(value)\n if word[-1] == 'e' or word[-1] == 's':\n word = word[:-1]\n if word[-1] == 'f':\n word = word[:-1] + 'v'\n return word + 'ième'\n\n def ordinal_number(self, value):\n \"\"\"Convert to ordinal number\"\"\"\n self._verify_ordinal(value)\n out = str(value)\n out += {'1': 'er'}.get(out[-1], 'me')\n return out\n\n def currency(self, val, longval=True, old=False):\n \"\"\"Convert to currency\"\"\"\n hightxt = 'Euro/s'\n if old:\n hightxt = 'franc/s'\n return self._split(val, hightxt=hightxt, lowtxt='centime/s', jointxt='et', longval=longval)\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/lib/python3.7/site-packages/numword/numword_fr.py", "source_repo": "Spam-Slayers/Newsify", "split": "val", "star_events_count": 0} {"blob_id": "e33c1d51dcdd56cec88fb87ea375a93e7bc3b41b", "bodies": ["from gui.main_form import MainForm\nself.main_form: MainForm = main_form\nself.menu = None\nself.file_menu = None\nself.report_menu = None\nself.scoring_menu = None", "self.menu = tkinter.Menu(self.main_form.root)\nself.main_form.root.config(menu=self.menu)\nself.file_menu = tkinter.Menu(self.menu, tearoff=False)\nself.menu.add_cascade(label=FILE_MENU, underline=0, menu=self.file_menu)\nself.file_menu.add_command(label=FILE_OPEN, command=self.main_form.on_open, accelerator='Strg+O')\nself.file_menu.add_command(label=FILE_REFRESH, command=self.main_form.on_refresh, accelerator='Strg+R')\nself.file_menu.add_separator()\nself.file_menu.add_command(label=FILE_QUIT, command=self.main_form.on_quit)\nself.report_menu = tkinter.Menu(self.menu, tearoff=False)\nself.menu.add_cascade(label=REPORT_MENU, underline=0, menu=self.report_menu)\nself.report_menu.add_command(label=REPORT_CLUB_TABLE, command=self.main_form.on_report_club)\nself.report_menu.add_command(label=REPORT_GROUP, command=self.main_form.on_report_group)\nself.report_menu.add_command(label=REPORT_STATIONS, command=self.main_form.on_report_stations)\nself.report_menu.add_command(label=REPORT_VALUES, command=self.main_form.on_create_value_tables)\nself.scoring_menu = tkinter.Menu(self.menu, tearoff=False)\nself.menu.add_cascade(label=SCORING_MENU, underline=0, menu=self.scoring_menu)\nself.scoring_menu.add_command(label=SCORING_REFRESH, command=self.main_form.on_scoring_refresh, accelerator='F5')\nself.scoring_menu.add_command(label=SCORING_CREATE, command=self.main_form.on_scoring_create)\nself.main_form.root.bind_all('', self.main_form.on_open)\nself.main_form.root.bind_all('', self.main_form.on_refresh)\nself.main_form.root.bind_all('', self.main_form.on_scoring_refresh)"], "bodies_text": "<|body_start_0|>\n from gui.main_form import MainForm\n self.main_form: MainForm = main_form\n self.menu = None\n self.file_menu = None\n self.report_menu = None\n self.scoring_menu = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.menu = tkinter.Menu(self.main_form.root)\n self.main_form.root.config(menu=self.menu)\n self.file_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=FILE_MENU, underline=0, menu=self.file_menu)\n self.file_menu.add_command(label=FILE_OPEN, command=self.main_form.on_open, accelerator='Strg+O')\n self.file_menu.add_command(label=FILE_REFRESH, command=self.main_form.on_refresh, accelerator='Strg+R')\n self.file_menu.add_separator()\n self.file_menu.add_command(label=FILE_QUIT, command=self.main_form.on_quit)\n self.report_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=REPORT_MENU, underline=0, menu=self.report_menu)\n self.report_menu.add_command(label=REPORT_CLUB_TABLE, command=self.main_form.on_report_club)\n self.report_menu.add_command(label=REPORT_GROUP, command=self.main_form.on_report_group)\n self.report_menu.add_command(label=REPORT_STATIONS, command=self.main_form.on_report_stations)\n self.report_menu.add_command(label=REPORT_VALUES, command=self.main_form.on_create_value_tables)\n self.scoring_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=SCORING_MENU, underline=0, menu=self.scoring_menu)\n self.scoring_menu.add_command(label=SCORING_REFRESH, command=self.main_form.on_scoring_refresh, accelerator='F5')\n self.scoring_menu.add_command(label=SCORING_CREATE, command=self.main_form.on_scoring_create)\n self.main_form.root.bind_all('', self.main_form.on_open)\n self.main_form.root.bind_all('', self.main_form.on_refresh)\n self.main_form.root.bind_all('', self.main_form.on_scoring_refresh)\n<|end_body_1|>\n", "class_docstring": "Menü-Leiste der MainForm", "class_name": "Menu", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Menu:\n \"\"\"Menü-Leiste der MainForm\"\"\"\n\n def __init__(self, main_form):\n \"\"\"Konstruktor Args: main_form (MainForm): MainForm\"\"\"\n <|body_0|>\n\n def create(self):\n \"\"\"Erstellen des Menüs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from gui.main_form import MainForm\n self.main_form: MainForm = main_form\n self.menu = None\n self.file_menu = None\n self.report_menu = None\n self.scoring_menu = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.menu = tkinter.Menu(self.main_form.root)\n self.main_form.root.config(menu=self.menu)\n self.file_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=FILE_MENU, underline=0, menu=self.file_menu)\n self.file_menu.add_command(label=FILE_OPEN, command=self.main_form.on_open, accelerator='Strg+O')\n self.file_menu.add_command(label=FILE_REFRESH, command=self.main_form.on_refresh, accelerator='Strg+R')\n self.file_menu.add_separator()\n self.file_menu.add_command(label=FILE_QUIT, command=self.main_form.on_quit)\n self.report_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=REPORT_MENU, underline=0, menu=self.report_menu)\n self.report_menu.add_command(label=REPORT_CLUB_TABLE, command=self.main_form.on_report_club)\n self.report_menu.add_command(label=REPORT_GROUP, command=self.main_form.on_report_group)\n self.report_menu.add_command(label=REPORT_STATIONS, command=self.main_form.on_report_stations)\n self.report_menu.add_command(label=REPORT_VALUES, command=self.main_form.on_create_value_tables)\n self.scoring_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=SCORING_MENU, underline=0, menu=self.scoring_menu)\n self.scoring_menu.add_command(label=SCORING_REFRESH, command=self.main_form.on_scoring_refresh, accelerator='F5')\n self.scoring_menu.add_command(label=SCORING_CREATE, command=self.main_form.on_scoring_create)\n self.main_form.root.bind_all('', self.main_form.on_open)\n self.main_form.root.bind_all('', self.main_form.on_refresh)\n self.main_form.root.bind_all('', self.main_form.on_scoring_refresh)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000414", "length_bytes": 3055, "license_type": "no_license", "methods": [{"docstring": "Konstruktor Args: main_form (MainForm): MainForm", "name": "__init__", "signature": "def __init__(self, main_form)"}, {"docstring": "Erstellen des Menüs", "name": "create", "signature": "def create(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006114", "prompt": "Implement the Python class `Menu` described below.\n\nClass description:\nMenü-Leiste der MainForm\n\nMethod signatures and docstrings:\n- def __init__(self, main_form): Konstruktor Args: main_form (MainForm): MainForm\n- def create(self): Erstellen des Menüs", "prompted_full_text": "Implement the Python class `Menu` described below.\n\nClass description:\nMenü-Leiste der MainForm\n\nMethod signatures and docstrings:\n- def __init__(self, main_form): Konstruktor Args: main_form (MainForm): MainForm\n- def create(self): Erstellen des Menüs\n\n<|skeleton|>\nclass Menu:\n \"\"\"Menü-Leiste der MainForm\"\"\"\n\n def __init__(self, main_form):\n \"\"\"Konstruktor Args: main_form (MainForm): MainForm\"\"\"\n <|body_0|>\n\n def create(self):\n \"\"\"Erstellen des Menüs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from gui.main_form import MainForm\n self.main_form: MainForm = main_form\n self.menu = None\n self.file_menu = None\n self.report_menu = None\n self.scoring_menu = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.menu = tkinter.Menu(self.main_form.root)\n self.main_form.root.config(menu=self.menu)\n self.file_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=FILE_MENU, underline=0, menu=self.file_menu)\n self.file_menu.add_command(label=FILE_OPEN, command=self.main_form.on_open, accelerator='Strg+O')\n self.file_menu.add_command(label=FILE_REFRESH, command=self.main_form.on_refresh, accelerator='Strg+R')\n self.file_menu.add_separator()\n self.file_menu.add_command(label=FILE_QUIT, command=self.main_form.on_quit)\n self.report_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=REPORT_MENU, underline=0, menu=self.report_menu)\n self.report_menu.add_command(label=REPORT_CLUB_TABLE, command=self.main_form.on_report_club)\n self.report_menu.add_command(label=REPORT_GROUP, command=self.main_form.on_report_group)\n self.report_menu.add_command(label=REPORT_STATIONS, command=self.main_form.on_report_stations)\n self.report_menu.add_command(label=REPORT_VALUES, command=self.main_form.on_create_value_tables)\n self.scoring_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=SCORING_MENU, underline=0, menu=self.scoring_menu)\n self.scoring_menu.add_command(label=SCORING_REFRESH, command=self.main_form.on_scoring_refresh, accelerator='F5')\n self.scoring_menu.add_command(label=SCORING_CREATE, command=self.main_form.on_scoring_create)\n self.main_form.root.bind_all('', self.main_form.on_open)\n self.main_form.root.bind_all('', self.main_form.on_refresh)\n self.main_form.root.bind_all('', self.main_form.on_scoring_refresh)\n<|end_body_1|>\n", "revision_id": "349aad3f5a71374f062a7a3b50d827dbf8e99bfe", "skeleton": "<|skeleton|>\nclass Menu:\n \"\"\"Menü-Leiste der MainForm\"\"\"\n\n def __init__(self, main_form):\n \"\"\"Konstruktor Args: main_form (MainForm): MainForm\"\"\"\n <|body_0|>\n\n def create(self):\n \"\"\"Erstellen des Menüs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Menu:\n \"\"\"Menü-Leiste der MainForm\"\"\"\n\n def __init__(self, main_form):\n \"\"\"Konstruktor Args: main_form (MainForm): MainForm\"\"\"\n from gui.main_form import MainForm\n self.main_form: MainForm = main_form\n self.menu = None\n self.file_menu = None\n self.report_menu = None\n self.scoring_menu = None\n\n def create(self):\n \"\"\"Erstellen des Menüs\"\"\"\n self.menu = tkinter.Menu(self.main_form.root)\n self.main_form.root.config(menu=self.menu)\n self.file_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=FILE_MENU, underline=0, menu=self.file_menu)\n self.file_menu.add_command(label=FILE_OPEN, command=self.main_form.on_open, accelerator='Strg+O')\n self.file_menu.add_command(label=FILE_REFRESH, command=self.main_form.on_refresh, accelerator='Strg+R')\n self.file_menu.add_separator()\n self.file_menu.add_command(label=FILE_QUIT, command=self.main_form.on_quit)\n self.report_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=REPORT_MENU, underline=0, menu=self.report_menu)\n self.report_menu.add_command(label=REPORT_CLUB_TABLE, command=self.main_form.on_report_club)\n self.report_menu.add_command(label=REPORT_GROUP, command=self.main_form.on_report_group)\n self.report_menu.add_command(label=REPORT_STATIONS, command=self.main_form.on_report_stations)\n self.report_menu.add_command(label=REPORT_VALUES, command=self.main_form.on_create_value_tables)\n self.scoring_menu = tkinter.Menu(self.menu, tearoff=False)\n self.menu.add_cascade(label=SCORING_MENU, underline=0, menu=self.scoring_menu)\n self.scoring_menu.add_command(label=SCORING_REFRESH, command=self.main_form.on_scoring_refresh, accelerator='F5')\n self.scoring_menu.add_command(label=SCORING_CREATE, command=self.main_form.on_scoring_create)\n self.main_form.root.bind_all('', self.main_form.on_open)\n self.main_form.root.bind_all('', self.main_form.on_refresh)\n self.main_form.root.bind_all('', self.main_form.on_scoring_refresh)\n", "source": "the_stack_v2_python_sparse", "source_path": "gui/menu.py", "source_repo": "RobFro96/Talentiadeverwaltung", "split": "val", "star_events_count": 0} {"blob_id": "66566ab3618fd3c6379d57a75c35dbcb253b8207", "bodies": ["self.src_type = source.type\nself.n_bits = source.n_bits\nself.received_text = None", "if len(received_bits) < self.n_bits:\n sys.stderr.write('Warning: Received fewer bits than expected\\n')\nelse:\n received_bits = received_bits[:self.n_bits]\nif self.src_type == Source.TEXT:\n self.received_text = self.get_text(received_bits)\nreturn numpy.array(received_bits, dtype=int)", "text = []\nintbits = numpy.array([], dtype=numpy.uint8)\nfor i in range(len(bits) // 8):\n intbits = numpy.append(intbits, self.bits2int(bits[i * 8:(i + 1) * 8]))\nfor c in intbits:\n text.append(chr(c))\nreturn ''.join([t for t in text])", "out = 0\nfor ix in range(len(bits)):\n out += bits[ix] * 2 ** (len(bits) - 1 - ix)\nreturn int(out)"], "bodies_text": "<|body_start_0|>\n self.src_type = source.type\n self.n_bits = source.n_bits\n self.received_text = None\n<|end_body_0|>\n\n<|body_start_1|>\n if len(received_bits) < self.n_bits:\n sys.stderr.write('Warning: Received fewer bits than expected\\n')\n else:\n received_bits = received_bits[:self.n_bits]\n if self.src_type == Source.TEXT:\n self.received_text = self.get_text(received_bits)\n return numpy.array(received_bits, dtype=int)\n<|end_body_1|>\n\n<|body_start_2|>\n text = []\n intbits = numpy.array([], dtype=numpy.uint8)\n for i in range(len(bits) // 8):\n intbits = numpy.append(intbits, self.bits2int(bits[i * 8:(i + 1) * 8]))\n for c in intbits:\n text.append(chr(c))\n return ''.join([t for t in text])\n<|end_body_2|>\n\n<|body_start_3|>\n out = 0\n for ix in range(len(bits)):\n out += bits[ix] * 2 ** (len(bits) - 1 - ix)\n return int(out)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Sink", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sink:\n\n def __init__(self, source):\n \"\"\"Source that this Sink is based on.\"\"\"\n <|body_0|>\n\n def process(self, received_bits):\n \"\"\"Process the received bits.\"\"\"\n <|body_1|>\n\n def get_text(self, bits):\n \"\"\"Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\"\"\"\n <|body_2|>\n\n def bits2int(self, bits):\n \"\"\"Converts a bit to an integer, so that we can get the ASCII encoding.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.src_type = source.type\n self.n_bits = source.n_bits\n self.received_text = None\n<|end_body_0|>\n\n<|body_start_1|>\n if len(received_bits) < self.n_bits:\n sys.stderr.write('Warning: Received fewer bits than expected\\n')\n else:\n received_bits = received_bits[:self.n_bits]\n if self.src_type == Source.TEXT:\n self.received_text = self.get_text(received_bits)\n return numpy.array(received_bits, dtype=int)\n<|end_body_1|>\n\n<|body_start_2|>\n text = []\n intbits = numpy.array([], dtype=numpy.uint8)\n for i in range(len(bits) // 8):\n intbits = numpy.append(intbits, self.bits2int(bits[i * 8:(i + 1) * 8]))\n for c in intbits:\n text.append(chr(c))\n return ''.join([t for t in text])\n<|end_body_2|>\n\n<|body_start_3|>\n out = 0\n for ix in range(len(bits)):\n out += bits[ix] * 2 ** (len(bits) - 1 - ix)\n return int(out)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000415", "length_bytes": 1754, "license_type": "no_license", "methods": [{"docstring": "Source that this Sink is based on.", "name": "__init__", "signature": "def __init__(self, source)"}, {"docstring": "Process the received bits.", "name": "process", "signature": "def process(self, received_bits)"}, {"docstring": "Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).", "name": "get_text", "signature": "def get_text(self, bits)"}, {"docstring": "Converts a bit to an integer, so that we can get the ASCII encoding.", "name": "bits2int", "signature": "def bits2int(self, bits)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006453", "prompt": "Implement the Python class `Sink` described below.\n\nClass description:\nImplement the Sink class.\n\nMethod signatures and docstrings:\n- def __init__(self, source): Source that this Sink is based on.\n- def process(self, received_bits): Process the received bits.\n- def get_text(self, bits): Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\n- def bits2int(self, bits): Converts a bit to an integer, so that we can get the ASCII encoding.", "prompted_full_text": "Implement the Python class `Sink` described below.\n\nClass description:\nImplement the Sink class.\n\nMethod signatures and docstrings:\n- def __init__(self, source): Source that this Sink is based on.\n- def process(self, received_bits): Process the received bits.\n- def get_text(self, bits): Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\n- def bits2int(self, bits): Converts a bit to an integer, so that we can get the ASCII encoding.\n\n<|skeleton|>\nclass Sink:\n\n def __init__(self, source):\n \"\"\"Source that this Sink is based on.\"\"\"\n <|body_0|>\n\n def process(self, received_bits):\n \"\"\"Process the received bits.\"\"\"\n <|body_1|>\n\n def get_text(self, bits):\n \"\"\"Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\"\"\"\n <|body_2|>\n\n def bits2int(self, bits):\n \"\"\"Converts a bit to an integer, so that we can get the ASCII encoding.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.src_type = source.type\n self.n_bits = source.n_bits\n self.received_text = None\n<|end_body_0|>\n\n<|body_start_1|>\n if len(received_bits) < self.n_bits:\n sys.stderr.write('Warning: Received fewer bits than expected\\n')\n else:\n received_bits = received_bits[:self.n_bits]\n if self.src_type == Source.TEXT:\n self.received_text = self.get_text(received_bits)\n return numpy.array(received_bits, dtype=int)\n<|end_body_1|>\n\n<|body_start_2|>\n text = []\n intbits = numpy.array([], dtype=numpy.uint8)\n for i in range(len(bits) // 8):\n intbits = numpy.append(intbits, self.bits2int(bits[i * 8:(i + 1) * 8]))\n for c in intbits:\n text.append(chr(c))\n return ''.join([t for t in text])\n<|end_body_2|>\n\n<|body_start_3|>\n out = 0\n for ix in range(len(bits)):\n out += bits[ix] * 2 ** (len(bits) - 1 - ix)\n return int(out)\n<|end_body_3|>\n", "revision_id": "3f672177be94ba98f65abd3b95df810573ceb9bb", "skeleton": "<|skeleton|>\nclass Sink:\n\n def __init__(self, source):\n \"\"\"Source that this Sink is based on.\"\"\"\n <|body_0|>\n\n def process(self, received_bits):\n \"\"\"Process the received bits.\"\"\"\n <|body_1|>\n\n def get_text(self, bits):\n \"\"\"Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\"\"\"\n <|body_2|>\n\n def bits2int(self, bits):\n \"\"\"Converts a bit to an integer, so that we can get the ASCII encoding.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Sink:\n def __init__(self, source):\n \"\"\"Source that this Sink is based on.\"\"\"\n self.src_type = source.type\n self.n_bits = source.n_bits\n self.received_text = None\n\n def process(self, received_bits):\n \"\"\"Process the received bits.\"\"\"\n if len(received_bits) < self.n_bits:\n sys.stderr.write('Warning: Received fewer bits than expected\\n')\n else:\n received_bits = received_bits[:self.n_bits]\n if self.src_type == Source.TEXT:\n self.received_text = self.get_text(received_bits)\n return numpy.array(received_bits, dtype=int)\n\n def get_text(self, bits):\n \"\"\"Returns the text represented by the array of bits (assumes that bits was created by a Source reading a text file).\"\"\"\n text = []\n intbits = numpy.array([], dtype=numpy.uint8)\n for i in range(len(bits) // 8):\n intbits = numpy.append(intbits, self.bits2int(bits[i * 8:(i + 1) * 8]))\n for c in intbits:\n text.append(chr(c))\n return ''.join([t for t in text])\n\n def bits2int(self, bits):\n \"\"\"Converts a bit to an integer, so that we can get the ASCII encoding.\"\"\"\n out = 0\n for ix in range(len(bits)):\n out += bits[ix] * 2 ** (len(bits) - 1 - ix)\n return int(out)\n", "source": "the_stack_v2_python_sparse", "source_path": "Psets/PS5/sink.py", "source_repo": "fabatef/6.02", "split": "val", "star_events_count": 0} {"blob_id": "bd8f123d8b33f42fb1aaa42e777d78eccb642692", "bodies": ["WrappingFactory.__init__(self, wrappedFactory)\nif isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\nelse:\n creatorInterface = IOpenSSLServerConnectionCreator\nself._creatorInterface = creatorInterface\nif not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\nself._connectionCreator = contextFactory", "if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\nelse:\n logPrefix = self.wrappedFactory.__class__.__name__\nreturn '{} (TLS)'.format(logPrefix)", "if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\nreturn", "connectionCreator = self._connectionCreator\nif self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\nelse:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\nreturn connection"], "bodies_text": "<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "class_docstring": "L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.", "class_name": "TLSMemoryBIOFactory", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000416", "length_bytes": 32500, "license_type": "permissive", "methods": [{"docstring": "Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne", "name": "__init__", "signature": "def __init__(self, contextFactory, isClient, wrappedFactory)"}, {"docstring": "Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}", "name": "logPrefix", "signature": "def logPrefix(self)"}, {"docstring": "Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}", "name": "_applyProtocolNegotiation", "signature": "def _applyProtocolNegotiation(self, connection)"}, {"docstring": "Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}", "name": "_createConnection", "signature": "def _createConnection(self, tlsProtocol)"}], "n_methods": 4, "prompt": "Implement the Python class `TLSMemoryBIOFactory` described below.\n\nClass description:\nL{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\n\nMethod signatures and docstrings:\n- def __init__(self, contextFactory, isClient, wrappedFactory): Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\n- def logPrefix(self): Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\n- def _applyProtocolNegotiation(self, connection): Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\n- def _createConnection(self, tlsProtocol): Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}", "prompted_full_text": "Implement the Python class `TLSMemoryBIOFactory` described below.\n\nClass description:\nL{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\n\nMethod signatures and docstrings:\n- def __init__(self, contextFactory, isClient, wrappedFactory): Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\n- def logPrefix(self): Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\n- def _applyProtocolNegotiation(self, connection): Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\n- def _createConnection(self, tlsProtocol): Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\n\n<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n<|end_body_0|>\n\n<|body_start_1|>\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n<|end_body_1|>\n\n<|body_start_2|>\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n<|end_body_3|>\n", "revision_id": "5cee0a8c4180a3108538b4e4ce945a18726595a6", "skeleton": "<|skeleton|>\nclass TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n <|body_0|>\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n <|body_1|>\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n <|body_2|>\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TLSMemoryBIOFactory:\n \"\"\"L{TLSMemoryBIOFactory} adds TLS to connections. @ivar _creatorInterface: the interface which L{_connectionCreator} is expected to implement. @type _creatorInterface: L{zope.interface.interfaces.IInterface} @ivar _connectionCreator: a callable which creates an OpenSSL Connection object. @type _connectionCreator: 1-argument callable taking L{TLSMemoryBIOProtocol} and returning L{OpenSSL.SSL.Connection}.\"\"\"\n\n def __init__(self, contextFactory, isClient, wrappedFactory):\n \"\"\"Create a L{TLSMemoryBIOFactory}. @param contextFactory: Configuration parameters used to create an OpenSSL connection. In order of preference, what you should pass here should be: 1. L{twisted.internet.ssl.CertificateOptions} (if you're writing a server) or the result of L{twisted.internet.ssl.optionsForClientTLS} (if you're writing a client). If you want security you should really use one of these. 2. If you really want to implement something yourself, supply a provider of L{IOpenSSLClientConnectionCreator} or L{IOpenSSLServerConnectionCreator}. 3. If you really have to, supply a L{twisted.internet.ssl.ContextFactory}. This will likely be deprecated at some point so please upgrade to the ne\"\"\"\n WrappingFactory.__init__(self, wrappedFactory)\n if isClient:\n creatorInterface = IOpenSSLClientConnectionCreator\n else:\n creatorInterface = IOpenSSLServerConnectionCreator\n self._creatorInterface = creatorInterface\n if not creatorInterface.providedBy(contextFactory):\n contextFactory = _ContextFactoryToConnectionFactory(contextFactory)\n self._connectionCreator = contextFactory\n\n def logPrefix(self):\n \"\"\"Annotate the wrapped factory's log prefix with some text indicating TLS is in use. @rtype: C{str}\"\"\"\n if ILoggingContext.providedBy(self.wrappedFactory):\n logPrefix = self.wrappedFactory.logPrefix()\n else:\n logPrefix = self.wrappedFactory.__class__.__name__\n return '{} (TLS)'.format(logPrefix)\n\n def _applyProtocolNegotiation(self, connection):\n \"\"\"Applies ALPN/NPN protocol neogitation to the connection, if the factory supports it. @param connection: The OpenSSL connection object to have ALPN/NPN added to it. @type connection: L{OpenSSL.SSL.Connection} @return: Nothing @rtype: L{None}\"\"\"\n if IProtocolNegotiationFactory.providedBy(self.wrappedFactory):\n protocols = self.wrappedFactory.acceptableProtocols()\n context = connection.get_context()\n _setAcceptableProtocols(context, protocols)\n return\n\n def _createConnection(self, tlsProtocol):\n \"\"\"Create an OpenSSL connection and set it up good. @param tlsProtocol: The protocol which is establishing the connection. @type tlsProtocol: L{TLSMemoryBIOProtocol} @return: an OpenSSL connection object for C{tlsProtocol} to use @rtype: L{OpenSSL.SSL.Connection}\"\"\"\n connectionCreator = self._connectionCreator\n if self._creatorInterface is IOpenSSLClientConnectionCreator:\n connection = connectionCreator.clientConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_connect_state()\n else:\n connection = connectionCreator.serverConnectionForTLS(tlsProtocol)\n self._applyProtocolNegotiation(connection)\n connection.set_accept_state()\n return connection\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/Lib/site-packages/twisted/protocols/tls.py", "source_repo": "zoelesv/Smathchat", "split": "val", "star_events_count": 9} {"blob_id": "e013bb92a9e26aa4e07c4bf60dbae05284a1b481", "bodies": ["if storage_type == 'sql':\n loader = cls._get_sql_loader(provider, **kwargs)\nelif storage_type == 's3':\n loader = cls._get_s3_loader(provider)\nelse:\n raise ValueError('Storage type %s is not supported' % storage_type)\nreturn loader", "loader: icdlab.AbstractS3DataLoader\nif provider == 'kibot':\n import im.kibot.data.load.kibot_s3_data_loader as ikdlki3\n loader = ikdlki3.KibotS3DataLoader()\nelif provider == 'ib':\n import im.ib.data.load.ib_s3_data_loader as iidlib3\n loader = iidlib3.IbS3DataLoader()\nelse:\n raise ValueError('S3 loader for %s is not implemented' % provider)\nreturn loader", "loader: icdlab.AbstractSqlDataLoader\nif provider == 'kibot':\n import im.kibot.data.load.kibot_sql_data_loader as ikdlki\n loader = ikdlki.KibotSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\nelif provider == 'ib':\n import im.ib.data.load.ib_sql_data_loader as iidlib\n loader = iidlib.IbSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\nelse:\n raise ValueError('SQL loader for %s is not implemented' % provider)\nreturn loader"], "bodies_text": "<|body_start_0|>\n if storage_type == 'sql':\n loader = cls._get_sql_loader(provider, **kwargs)\n elif storage_type == 's3':\n loader = cls._get_s3_loader(provider)\n else:\n raise ValueError('Storage type %s is not supported' % storage_type)\n return loader\n<|end_body_0|>\n\n<|body_start_1|>\n loader: icdlab.AbstractS3DataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_s3_data_loader as ikdlki3\n loader = ikdlki3.KibotS3DataLoader()\n elif provider == 'ib':\n import im.ib.data.load.ib_s3_data_loader as iidlib3\n loader = iidlib3.IbS3DataLoader()\n else:\n raise ValueError('S3 loader for %s is not implemented' % provider)\n return loader\n<|end_body_1|>\n\n<|body_start_2|>\n loader: icdlab.AbstractSqlDataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_sql_data_loader as ikdlki\n loader = ikdlki.KibotSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n elif provider == 'ib':\n import im.ib.data.load.ib_sql_data_loader as iidlib\n loader = iidlib.IbSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n else:\n raise ValueError('SQL loader for %s is not implemented' % provider)\n return loader\n<|end_body_2|>\n", "class_docstring": "Builds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).", "class_name": "LoaderFactory", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoaderFactory:\n \"\"\"Builds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\"\"\"\n\n def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader:\n \"\"\"Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\"\"\"\n <|body_0|>\n\n def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader:\n \"\"\"Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\"\"\"\n <|body_1|>\n\n def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader:\n \"\"\"Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if storage_type == 'sql':\n loader = cls._get_sql_loader(provider, **kwargs)\n elif storage_type == 's3':\n loader = cls._get_s3_loader(provider)\n else:\n raise ValueError('Storage type %s is not supported' % storage_type)\n return loader\n<|end_body_0|>\n\n<|body_start_1|>\n loader: icdlab.AbstractS3DataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_s3_data_loader as ikdlki3\n loader = ikdlki3.KibotS3DataLoader()\n elif provider == 'ib':\n import im.ib.data.load.ib_s3_data_loader as iidlib3\n loader = iidlib3.IbS3DataLoader()\n else:\n raise ValueError('S3 loader for %s is not implemented' % provider)\n return loader\n<|end_body_1|>\n\n<|body_start_2|>\n loader: icdlab.AbstractSqlDataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_sql_data_loader as ikdlki\n loader = ikdlki.KibotSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n elif provider == 'ib':\n import im.ib.data.load.ib_sql_data_loader as iidlib\n loader = iidlib.IbSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n else:\n raise ValueError('SQL loader for %s is not implemented' % provider)\n return loader\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000417", "length_bytes": 3109, "license_type": "permissive", "methods": [{"docstring": "Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider", "name": "get_loader", "signature": "def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader"}, {"docstring": "Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider", "name": "_get_s3_loader", "signature": "def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader"}, {"docstring": "Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider", "name": "_get_sql_loader", "signature": "def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002108", "prompt": "Implement the Python class `LoaderFactory` described below.\n\nClass description:\nBuilds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\n\nMethod signatures and docstrings:\n- def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader: Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\n- def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader: Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\n- def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader: Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider", "prompted_full_text": "Implement the Python class `LoaderFactory` described below.\n\nClass description:\nBuilds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\n\nMethod signatures and docstrings:\n- def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader: Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\n- def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader: Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\n- def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader: Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider\n\n<|skeleton|>\nclass LoaderFactory:\n \"\"\"Builds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\"\"\"\n\n def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader:\n \"\"\"Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\"\"\"\n <|body_0|>\n\n def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader:\n \"\"\"Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\"\"\"\n <|body_1|>\n\n def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader:\n \"\"\"Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if storage_type == 'sql':\n loader = cls._get_sql_loader(provider, **kwargs)\n elif storage_type == 's3':\n loader = cls._get_s3_loader(provider)\n else:\n raise ValueError('Storage type %s is not supported' % storage_type)\n return loader\n<|end_body_0|>\n\n<|body_start_1|>\n loader: icdlab.AbstractS3DataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_s3_data_loader as ikdlki3\n loader = ikdlki3.KibotS3DataLoader()\n elif provider == 'ib':\n import im.ib.data.load.ib_s3_data_loader as iidlib3\n loader = iidlib3.IbS3DataLoader()\n else:\n raise ValueError('S3 loader for %s is not implemented' % provider)\n return loader\n<|end_body_1|>\n\n<|body_start_2|>\n loader: icdlab.AbstractSqlDataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_sql_data_loader as ikdlki\n loader = ikdlki.KibotSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n elif provider == 'ib':\n import im.ib.data.load.ib_sql_data_loader as iidlib\n loader = iidlib.IbSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n else:\n raise ValueError('SQL loader for %s is not implemented' % provider)\n return loader\n<|end_body_2|>\n", "revision_id": "363c59fa29df2ba2719cbad2f8a19ae12cc54a92", "skeleton": "<|skeleton|>\nclass LoaderFactory:\n \"\"\"Builds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\"\"\"\n\n def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader:\n \"\"\"Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\"\"\"\n <|body_0|>\n\n def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader:\n \"\"\"Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\"\"\"\n <|body_1|>\n\n def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader:\n \"\"\"Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoaderFactory:\n \"\"\"Builds AbstractDataLoader objects based on different criteria (e.g., provider and storage type).\"\"\"\n\n def get_loader(cls, storage_type: str, provider: str, **kwargs: Any) -> icdlab.AbstractDataLoader:\n \"\"\"Return a data loader for the requested `storage_type` and `provider`. :param storage_type: load from where (e.g., s3, sql) :param provider: provider (e.g., kibot, ib) :param kwargs: additional parameters for loader instantiation :raises ValueError: `storage_type` loader is not implemented for provider\"\"\"\n if storage_type == 'sql':\n loader = cls._get_sql_loader(provider, **kwargs)\n elif storage_type == 's3':\n loader = cls._get_s3_loader(provider)\n else:\n raise ValueError('Storage type %s is not supported' % storage_type)\n return loader\n\n def _get_s3_loader(provider: str) -> icdlab.AbstractS3DataLoader:\n \"\"\"Return a data loader from S3 for the requested `provider`. :param provider: provider (e.g., kibot) :raises ValueError: if loader is not implemented for provider\"\"\"\n loader: icdlab.AbstractS3DataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_s3_data_loader as ikdlki3\n loader = ikdlki3.KibotS3DataLoader()\n elif provider == 'ib':\n import im.ib.data.load.ib_s3_data_loader as iidlib3\n loader = iidlib3.IbS3DataLoader()\n else:\n raise ValueError('S3 loader for %s is not implemented' % provider)\n return loader\n\n def _get_sql_loader(provider: str, dbname: str, user: str, password: str, host: str, port: int) -> icdlab.AbstractSqlDataLoader:\n \"\"\"Return a data loader from SQL for the requested `provider`. :param provider: provider (e.g., kibot) :param dbname: database name to connect :param user: authorization user :param password: authorization password :param host: database host :param port: database port :raises ValueError: if SQL loader is not implemented for provider\"\"\"\n loader: icdlab.AbstractSqlDataLoader\n if provider == 'kibot':\n import im.kibot.data.load.kibot_sql_data_loader as ikdlki\n loader = ikdlki.KibotSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n elif provider == 'ib':\n import im.ib.data.load.ib_sql_data_loader as iidlib\n loader = iidlib.IbSqlDataLoader(dbname=dbname, user=user, password=password, host=host, port=port)\n else:\n raise ValueError('SQL loader for %s is not implemented' % provider)\n return loader\n", "source": "the_stack_v2_python_sparse", "source_path": "im/app/services/loader_factory.py", "source_repo": "srlindemann/amp", "split": "val", "star_events_count": 0} {"blob_id": "d289e5547441ca375772b60e966cb0cf439913fb", "bodies": ["super().__init__()\nself.size = size\nself.trg_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\nself.src_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\nself.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, dropout=dropout, alpha=alpha, layer_norm=layer_norm, activation=activation)\nself.x_layer_norm = nn.LayerNorm(size, eps=1e-06)\nself.dec_layer_norm = nn.LayerNorm(size, eps=1e-06)\nself.dropout = nn.Dropout(dropout)\nself.alpha = alpha\nself._layer_norm_position = layer_norm\nassert self._layer_norm_position in {'pre', 'post'}", "residual = x\nif self._layer_norm_position == 'pre':\n x = self.x_layer_norm(x)\nh1, _ = self.trg_trg_att(x, x, x, mask=trg_mask)\nh1 = self.dropout(h1) + self.alpha * residual\nif self._layer_norm_position == 'post':\n h1 = self.x_layer_norm(h1)\nh1_residual = h1\nif self._layer_norm_position == 'pre':\n h1 = self.dec_layer_norm(h1)\nh2, att = self.src_trg_att(memory, memory, h1, mask=src_mask, return_weights=return_attention)\nh2 = self.dropout(h2) + self.alpha * h1_residual\nif self._layer_norm_position == 'post':\n h2 = self.dec_layer_norm(h2)\nout = self.feed_forward(h2)\nif return_attention:\n return (out, att)\nreturn (out, None)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.size = size\n self.trg_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.src_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, dropout=dropout, alpha=alpha, layer_norm=layer_norm, activation=activation)\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dropout = nn.Dropout(dropout)\n self.alpha = alpha\n self._layer_norm_position = layer_norm\n assert self._layer_norm_position in {'pre', 'post'}\n<|end_body_0|>\n\n<|body_start_1|>\n residual = x\n if self._layer_norm_position == 'pre':\n x = self.x_layer_norm(x)\n h1, _ = self.trg_trg_att(x, x, x, mask=trg_mask)\n h1 = self.dropout(h1) + self.alpha * residual\n if self._layer_norm_position == 'post':\n h1 = self.x_layer_norm(h1)\n h1_residual = h1\n if self._layer_norm_position == 'pre':\n h1 = self.dec_layer_norm(h1)\n h2, att = self.src_trg_att(memory, memory, h1, mask=src_mask, return_weights=return_attention)\n h2 = self.dropout(h2) + self.alpha * h1_residual\n if self._layer_norm_position == 'post':\n h2 = self.dec_layer_norm(h2)\n out = self.feed_forward(h2)\n if return_attention:\n return (out, att)\n return (out, None)\n<|end_body_1|>\n", "class_docstring": "Transformer decoder layer. Consists of self-attention, source-attention, and feed-forward.", "class_name": "TransformerDecoderLayer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TransformerDecoderLayer:\n \"\"\"Transformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\"\"\"\n\n def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None:\n \"\"\"Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor:\n \"\"\"Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.size = size\n self.trg_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.src_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, dropout=dropout, alpha=alpha, layer_norm=layer_norm, activation=activation)\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dropout = nn.Dropout(dropout)\n self.alpha = alpha\n self._layer_norm_position = layer_norm\n assert self._layer_norm_position in {'pre', 'post'}\n<|end_body_0|>\n\n<|body_start_1|>\n residual = x\n if self._layer_norm_position == 'pre':\n x = self.x_layer_norm(x)\n h1, _ = self.trg_trg_att(x, x, x, mask=trg_mask)\n h1 = self.dropout(h1) + self.alpha * residual\n if self._layer_norm_position == 'post':\n h1 = self.x_layer_norm(h1)\n h1_residual = h1\n if self._layer_norm_position == 'pre':\n h1 = self.dec_layer_norm(h1)\n h2, att = self.src_trg_att(memory, memory, h1, mask=src_mask, return_weights=return_attention)\n h2 = self.dropout(h2) + self.alpha * h1_residual\n if self._layer_norm_position == 'post':\n h2 = self.dec_layer_norm(h2)\n out = self.feed_forward(h2)\n if return_attention:\n return (out, att)\n return (out, None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000418", "length_bytes": 13169, "license_type": "permissive", "methods": [{"docstring": "Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function", "name": "__init__", "signature": "def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None"}, {"docstring": "Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights", "name": "forward", "signature": "def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001946", "prompt": "Implement the Python class `TransformerDecoderLayer` described below.\n\nClass description:\nTransformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None: Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\n- def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor: Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights", "prompted_full_text": "Implement the Python class `TransformerDecoderLayer` described below.\n\nClass description:\nTransformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None: Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\n- def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor: Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights\n\n<|skeleton|>\nclass TransformerDecoderLayer:\n \"\"\"Transformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\"\"\"\n\n def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None:\n \"\"\"Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor:\n \"\"\"Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.size = size\n self.trg_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.src_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, dropout=dropout, alpha=alpha, layer_norm=layer_norm, activation=activation)\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dropout = nn.Dropout(dropout)\n self.alpha = alpha\n self._layer_norm_position = layer_norm\n assert self._layer_norm_position in {'pre', 'post'}\n<|end_body_0|>\n\n<|body_start_1|>\n residual = x\n if self._layer_norm_position == 'pre':\n x = self.x_layer_norm(x)\n h1, _ = self.trg_trg_att(x, x, x, mask=trg_mask)\n h1 = self.dropout(h1) + self.alpha * residual\n if self._layer_norm_position == 'post':\n h1 = self.x_layer_norm(h1)\n h1_residual = h1\n if self._layer_norm_position == 'pre':\n h1 = self.dec_layer_norm(h1)\n h2, att = self.src_trg_att(memory, memory, h1, mask=src_mask, return_weights=return_attention)\n h2 = self.dropout(h2) + self.alpha * h1_residual\n if self._layer_norm_position == 'post':\n h2 = self.dec_layer_norm(h2)\n out = self.feed_forward(h2)\n if return_attention:\n return (out, att)\n return (out, None)\n<|end_body_1|>\n", "revision_id": "0968187ac0968007cabebed5e5cb6587c08dff78", "skeleton": "<|skeleton|>\nclass TransformerDecoderLayer:\n \"\"\"Transformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\"\"\"\n\n def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None:\n \"\"\"Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\"\"\"\n <|body_0|>\n\n def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor:\n \"\"\"Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TransformerDecoderLayer:\n \"\"\"Transformer decoder layer. Consists of self-attention, source-attention, and feed-forward.\"\"\"\n\n def __init__(self, size: int=0, ff_size: int=0, num_heads: int=0, dropout: float=0.1, alpha: float=1.0, layer_norm: str='post', activation: str='relu') -> None:\n \"\"\"Represents a single Transformer decoder layer. It attends to the source representation and the previous decoder states. Note: don't change the name or the order of members! otherwise pretrained models cannot be loaded correctly. :param size: model dimensionality :param ff_size: size of the feed-forward intermediate layer :param num_heads: number of heads :param dropout: dropout to apply to input :param alpha: weight factor for residual connection :param layer_norm: either \"pre\" or \"post\" :param activation: activation function\"\"\"\n super().__init__()\n self.size = size\n self.trg_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.src_trg_att = MultiHeadedAttention(num_heads, size, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size, dropout=dropout, alpha=alpha, layer_norm=layer_norm, activation=activation)\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-06)\n self.dropout = nn.Dropout(dropout)\n self.alpha = alpha\n self._layer_norm_position = layer_norm\n assert self._layer_norm_position in {'pre', 'post'}\n\n def forward(self, x: Tensor, memory: Tensor, src_mask: Tensor, trg_mask: Tensor, return_attention: bool=False, **kwargs) -> Tensor:\n \"\"\"Forward pass of a single Transformer decoder layer. First applies target-target self-attention, dropout with residual connection (adding the input to the result), and layer norm. Second computes source-target cross-attention, dropout with residual connection (adding the self-attention to the result), and layer norm. Finally goes through a position-wise feed-forward layer. :param x: inputs :param memory: source representations :param src_mask: source mask :param trg_mask: target mask (so as to not condition on future steps) :param return_attention: whether to return the attention weights :return: - output tensor - attention weights\"\"\"\n residual = x\n if self._layer_norm_position == 'pre':\n x = self.x_layer_norm(x)\n h1, _ = self.trg_trg_att(x, x, x, mask=trg_mask)\n h1 = self.dropout(h1) + self.alpha * residual\n if self._layer_norm_position == 'post':\n h1 = self.x_layer_norm(h1)\n h1_residual = h1\n if self._layer_norm_position == 'pre':\n h1 = self.dec_layer_norm(h1)\n h2, att = self.src_trg_att(memory, memory, h1, mask=src_mask, return_weights=return_attention)\n h2 = self.dropout(h2) + self.alpha * h1_residual\n if self._layer_norm_position == 'post':\n h2 = self.dec_layer_norm(h2)\n out = self.feed_forward(h2)\n if return_attention:\n return (out, att)\n return (out, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "joeynmt/transformer_layers.py", "source_repo": "joeynmt/joeynmt", "split": "val", "star_events_count": 668} {"blob_id": "14388c43e0808f12454f282c0f52bea3563b8e96", "bodies": ["log.info('Setup Section verifyProcessorDetails')\nhost_ip = classparam['host_ip']\nboot_order_obj = classparam['boot_order_obj']\nself.host_serial_handle = classparam['host_serial_handle']\nself.host_serial_handle.connect_to_host_serial()\nlog.info('Create boot device from CIMC config and boot from it')\nif boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\nlog.info('Waiting for host to boot into respective boot device')\ncimc_util_obj.power_cycle_host()\nres = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\nif res is False:\n log.error('Failed to boot from cimc configured boot device')\nelse:\n log.info('Successfully booted from cimc configured boot device')", "expected_out = classparam['expected_out']\nvalidation_string = classparam['validation_string']\nbootdev = parameter\noptions = 'persistent'\ncmd_out = configure_boot_device_ipmi(config, bootdev, options)\nif cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\nelse:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\nlog.info('Validation: connecting host over telnet and verify console logs')\nlog.info('Verify that host boots to expected boot device %s' % parameter)\ncimc_util_obj.power_cycle_host()\ncmd = validation_string[parameter]\nlog.info('Expected string is: ' + str(cmd.encode()))\nresult = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\nif result == 'Fail':\n self.failed('Failed to validate non-persistent boot for boot device %s' % parameter)", "expected_out = classparam['expected_out']\nvalidation_string = classparam['validation_string']\nself.host_serial_handle.disconnect()\nremove_consistent_ipmi_boot(cimc_util_obj, config)\nlog.info('Cleanup section passed')"], "bodies_text": "<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n log.error('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate non-persistent boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "class_docstring": "Configure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it", "class_name": "CimcConfigIPMICmdNonPersistentBootDevice", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CimcConfigIPMICmdNonPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n log.error('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate non-persistent boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000419", "length_bytes": 19363, "license_type": "no_license", "methods": [{"docstring": "Test Case Setup", "name": "setup", "signature": "def setup(self, cimc_util_obj)"}, {"docstring": "ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode", "name": "test", "signature": "def test(self, cimc_util_obj, config, parameter)"}, {"docstring": "Test Case Cleanup", "name": "cleanup", "signature": "def cleanup(self, cimc_util_obj, config)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004582", "prompt": "Implement the Python class `CimcConfigIPMICmdNonPersistentBootDevice` described below.\n\nClass description:\nConfigure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\n\nMethod signatures and docstrings:\n- def setup(self, cimc_util_obj): Test Case Setup\n- def test(self, cimc_util_obj, config, parameter): ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\n- def cleanup(self, cimc_util_obj, config): Test Case Cleanup", "prompted_full_text": "Implement the Python class `CimcConfigIPMICmdNonPersistentBootDevice` described below.\n\nClass description:\nConfigure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\n\nMethod signatures and docstrings:\n- def setup(self, cimc_util_obj): Test Case Setup\n- def test(self, cimc_util_obj, config, parameter): ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\n- def cleanup(self, cimc_util_obj, config): Test Case Cleanup\n\n<|skeleton|>\nclass CimcConfigIPMICmdNonPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n log.error('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate non-persistent boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "revision_id": "c255e045a4950a0d8868a10012d5ce6e5c6a9c23", "skeleton": "<|skeleton|>\nclass CimcConfigIPMICmdNonPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CimcConfigIPMICmdNonPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy drive options in non persistent mode when boot device set from CIMC config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n log.error('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in non-persistent mode\"\"\"\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate non-persistent boot for boot device %s' % parameter)\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n", "source": "the_stack_v2_python_sparse", "source_path": "ipmi_cmnd_bootorder.py", "source_repo": "jrchanda/MyRepo", "split": "val", "star_events_count": 0} {"blob_id": "9f44c8abe0207208145d35d42f21e4ddc455aa7e", "bodies": ["formattedResult = DBFormatter.formatDict(self, result)\nfor entry in formattedResult:\n if entry['bool_outcome'] == 0:\n entry['outcome'] = 'failure'\n else:\n entry['outcome'] = 'success'\n del entry['bool_outcome']\nif len(formattedResult) == 1:\n return formattedResult[0]\nelse:\n return formattedResult", "if isinstance(jobID, list):\n binds = jobID\nelse:\n binds = {'jobid': jobID}\nresult = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\nreturn self.formatDict(result)"], "bodies_text": "<|body_start_0|>\n formattedResult = DBFormatter.formatDict(self, result)\n for entry in formattedResult:\n if entry['bool_outcome'] == 0:\n entry['outcome'] = 'failure'\n else:\n entry['outcome'] = 'success'\n del entry['bool_outcome']\n if len(formattedResult) == 1:\n return formattedResult[0]\n else:\n return formattedResult\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(jobID, list):\n binds = jobID\n else:\n binds = {'jobid': jobID}\n result = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\n return self.formatDict(result)\n<|end_body_1|>\n", "class_docstring": "_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.", "class_name": "LoadFromID", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoadFromID:\n \"\"\"_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\"\"\"\n\n def formatDict(self, result):\n \"\"\"_formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\"\"\"\n <|body_0|>\n\n def execute(self, jobID, conn=None, transaction=False):\n \"\"\"_execute_ Execute the SQL for the given job ID and then format and return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n formattedResult = DBFormatter.formatDict(self, result)\n for entry in formattedResult:\n if entry['bool_outcome'] == 0:\n entry['outcome'] = 'failure'\n else:\n entry['outcome'] = 'success'\n del entry['bool_outcome']\n if len(formattedResult) == 1:\n return formattedResult[0]\n else:\n return formattedResult\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(jobID, list):\n binds = jobID\n else:\n binds = {'jobid': jobID}\n result = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\n return self.formatDict(result)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000420", "length_bytes": 1993, "license_type": "permissive", "methods": [{"docstring": "_formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.", "name": "formatDict", "signature": "def formatDict(self, result)"}, {"docstring": "_execute_ Execute the SQL for the given job ID and then format and return the result.", "name": "execute", "signature": "def execute(self, jobID, conn=None, transaction=False)"}], "n_methods": 2, "prompt": "Implement the Python class `LoadFromID` described below.\n\nClass description:\n_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\n\nMethod signatures and docstrings:\n- def formatDict(self, result): _formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\n- def execute(self, jobID, conn=None, transaction=False): _execute_ Execute the SQL for the given job ID and then format and return the result.", "prompted_full_text": "Implement the Python class `LoadFromID` described below.\n\nClass description:\n_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\n\nMethod signatures and docstrings:\n- def formatDict(self, result): _formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\n- def execute(self, jobID, conn=None, transaction=False): _execute_ Execute the SQL for the given job ID and then format and return the result.\n\n<|skeleton|>\nclass LoadFromID:\n \"\"\"_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\"\"\"\n\n def formatDict(self, result):\n \"\"\"_formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\"\"\"\n <|body_0|>\n\n def execute(self, jobID, conn=None, transaction=False):\n \"\"\"_execute_ Execute the SQL for the given job ID and then format and return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n formattedResult = DBFormatter.formatDict(self, result)\n for entry in formattedResult:\n if entry['bool_outcome'] == 0:\n entry['outcome'] = 'failure'\n else:\n entry['outcome'] = 'success'\n del entry['bool_outcome']\n if len(formattedResult) == 1:\n return formattedResult[0]\n else:\n return formattedResult\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(jobID, list):\n binds = jobID\n else:\n binds = {'jobid': jobID}\n result = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\n return self.formatDict(result)\n<|end_body_1|>\n", "revision_id": "de110ccf6fc63ef5589b4e871ef4d51d5bce7a25", "skeleton": "<|skeleton|>\nclass LoadFromID:\n \"\"\"_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\"\"\"\n\n def formatDict(self, result):\n \"\"\"_formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\"\"\"\n <|body_0|>\n\n def execute(self, jobID, conn=None, transaction=False):\n \"\"\"_execute_ Execute the SQL for the given job ID and then format and return the result.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoadFromID:\n \"\"\"_LoadFromID_ Retrieve meta data for a job given it's ID. This includes the name, job group and last update time.\"\"\"\n\n def formatDict(self, result):\n \"\"\"_formatDict_ Cast the id, jobgroup and last_update columns to integers because formatDict() turns everything into strings.\"\"\"\n formattedResult = DBFormatter.formatDict(self, result)\n for entry in formattedResult:\n if entry['bool_outcome'] == 0:\n entry['outcome'] = 'failure'\n else:\n entry['outcome'] = 'success'\n del entry['bool_outcome']\n if len(formattedResult) == 1:\n return formattedResult[0]\n else:\n return formattedResult\n\n def execute(self, jobID, conn=None, transaction=False):\n \"\"\"_execute_ Execute the SQL for the given job ID and then format and return the result.\"\"\"\n if isinstance(jobID, list):\n binds = jobID\n else:\n binds = {'jobid': jobID}\n result = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\n return self.formatDict(result)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/python/WMCore/WMBS/MySQL/Jobs/LoadFromID.py", "source_repo": "vkuznet/WMCore", "split": "val", "star_events_count": 0} {"blob_id": "54125502f388c1768a01670557979b5686089a45", "bodies": ["self.input = img\nself.siftDetector = cv2.FeatureDetector_create('SIFT')\nself.siftDetector.setInt('nOctaveLayers', nOctaveLayers)\nself.siftDetector.setDouble('contrastThreshold', contrastThreshold)\nself.siftDetector.setInt('edgeThreshold', edgeThreshold)\nself.siftDetector.setDouble('sigma', sigma)\nself.siftExtractor = cv2.DescriptorExtractor_create('SIFT')\nself.keyPoints = None\nself.descriptors = None\nself.coordinates = None", "keyPoints = self.siftDetector.detect(self.input, None)\nkeyPoints, descriptors = self.siftExtractor.compute(self.input, keyPoints)\nself.keyPoints = keyPoints\nself.descriptors = descriptors\nm = size(self.keyPoints)\nsiftCoord = np.zeros([m, 2])\nfor i in range(m):\n siftCoord[i][0] = self.keyPoints[i].pt[1]\n siftCoord[i][1] = self.keyPoints[i].pt[0]\nif plot_flag == True:\n figure()\n gray()\n imshow(self.input)\n plot([p[1] for p in siftCoord], [p[0] for p in siftCoord], '*')\n axis('off')\n show()\nself.coordinates = siftCoord"], "bodies_text": "<|body_start_0|>\n self.input = img\n self.siftDetector = cv2.FeatureDetector_create('SIFT')\n self.siftDetector.setInt('nOctaveLayers', nOctaveLayers)\n self.siftDetector.setDouble('contrastThreshold', contrastThreshold)\n self.siftDetector.setInt('edgeThreshold', edgeThreshold)\n self.siftDetector.setDouble('sigma', sigma)\n self.siftExtractor = cv2.DescriptorExtractor_create('SIFT')\n self.keyPoints = None\n self.descriptors = None\n self.coordinates = None\n<|end_body_0|>\n\n<|body_start_1|>\n keyPoints = self.siftDetector.detect(self.input, None)\n keyPoints, descriptors = self.siftExtractor.compute(self.input, keyPoints)\n self.keyPoints = keyPoints\n self.descriptors = descriptors\n m = size(self.keyPoints)\n siftCoord = np.zeros([m, 2])\n for i in range(m):\n siftCoord[i][0] = self.keyPoints[i].pt[1]\n siftCoord[i][1] = self.keyPoints[i].pt[0]\n if plot_flag == True:\n figure()\n gray()\n imshow(self.input)\n plot([p[1] for p in siftCoord], [p[0] for p in siftCoord], '*')\n axis('off')\n show()\n self.coordinates = siftCoord\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SIFT_Obj", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SIFT_Obj:\n\n def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6):\n \"\"\"**Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\"\"\"\n <|body_0|>\n\n def SIFT_Keypoints_Descriptors(self, plot_flag=True):\n \"\"\"**Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.input = img\n self.siftDetector = cv2.FeatureDetector_create('SIFT')\n self.siftDetector.setInt('nOctaveLayers', nOctaveLayers)\n self.siftDetector.setDouble('contrastThreshold', contrastThreshold)\n self.siftDetector.setInt('edgeThreshold', edgeThreshold)\n self.siftDetector.setDouble('sigma', sigma)\n self.siftExtractor = cv2.DescriptorExtractor_create('SIFT')\n self.keyPoints = None\n self.descriptors = None\n self.coordinates = None\n<|end_body_0|>\n\n<|body_start_1|>\n keyPoints = self.siftDetector.detect(self.input, None)\n keyPoints, descriptors = self.siftExtractor.compute(self.input, keyPoints)\n self.keyPoints = keyPoints\n self.descriptors = descriptors\n m = size(self.keyPoints)\n siftCoord = np.zeros([m, 2])\n for i in range(m):\n siftCoord[i][0] = self.keyPoints[i].pt[1]\n siftCoord[i][1] = self.keyPoints[i].pt[0]\n if plot_flag == True:\n figure()\n gray()\n imshow(self.input)\n plot([p[1] for p in siftCoord], [p[0] for p in siftCoord], '*')\n axis('off')\n show()\n self.coordinates = siftCoord\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000421", "length_bytes": 6133, "license_type": "no_license", "methods": [{"docstring": "**Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.", "name": "__init__", "signature": "def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6)"}, {"docstring": "**Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*", "name": "SIFT_Keypoints_Descriptors", "signature": "def SIFT_Keypoints_Descriptors(self, plot_flag=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000004", "prompt": "Implement the Python class `SIFT_Obj` described below.\n\nClass description:\nImplement the SIFT_Obj class.\n\nMethod signatures and docstrings:\n- def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6): **Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\n- def SIFT_Keypoints_Descriptors(self, plot_flag=True): **Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*", "prompted_full_text": "Implement the Python class `SIFT_Obj` described below.\n\nClass description:\nImplement the SIFT_Obj class.\n\nMethod signatures and docstrings:\n- def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6): **Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\n- def SIFT_Keypoints_Descriptors(self, plot_flag=True): **Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*\n\n<|skeleton|>\nclass SIFT_Obj:\n\n def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6):\n \"\"\"**Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\"\"\"\n <|body_0|>\n\n def SIFT_Keypoints_Descriptors(self, plot_flag=True):\n \"\"\"**Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.input = img\n self.siftDetector = cv2.FeatureDetector_create('SIFT')\n self.siftDetector.setInt('nOctaveLayers', nOctaveLayers)\n self.siftDetector.setDouble('contrastThreshold', contrastThreshold)\n self.siftDetector.setInt('edgeThreshold', edgeThreshold)\n self.siftDetector.setDouble('sigma', sigma)\n self.siftExtractor = cv2.DescriptorExtractor_create('SIFT')\n self.keyPoints = None\n self.descriptors = None\n self.coordinates = None\n<|end_body_0|>\n\n<|body_start_1|>\n keyPoints = self.siftDetector.detect(self.input, None)\n keyPoints, descriptors = self.siftExtractor.compute(self.input, keyPoints)\n self.keyPoints = keyPoints\n self.descriptors = descriptors\n m = size(self.keyPoints)\n siftCoord = np.zeros([m, 2])\n for i in range(m):\n siftCoord[i][0] = self.keyPoints[i].pt[1]\n siftCoord[i][1] = self.keyPoints[i].pt[0]\n if plot_flag == True:\n figure()\n gray()\n imshow(self.input)\n plot([p[1] for p in siftCoord], [p[0] for p in siftCoord], '*')\n axis('off')\n show()\n self.coordinates = siftCoord\n<|end_body_1|>\n", "revision_id": "90531055691a094dd271966b53c40b7a097df375", "skeleton": "<|skeleton|>\nclass SIFT_Obj:\n\n def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6):\n \"\"\"**Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\"\"\"\n <|body_0|>\n\n def SIFT_Keypoints_Descriptors(self, plot_flag=True):\n \"\"\"**Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SIFT_Obj:\n def __init__(self, img, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=1.6):\n \"\"\"**Definition**: SIFT_Obj(img, upright = False) SIFT object class. Implementation of SIFT detector and SIFT descriptor. **Inputs**: * img: the source image for which the descriptors are computed * nOctaveLayers (optional): default *3*. See OpenCV documentation * contrastThreshold (optional): default *0.04*. See OpenCV documentation * edgeThreshold (optional): default *10*. See OpenCV documentation * sigma (optional): default *1.6*. See OpenCV documentation **Outputs - Class members**: * keyPoints: extracted SIFT keypoints for the image *img*. Type: KeyPoint class of OpenCV. * descriptors: extracted SIFT descriptors for image *img*. Type: numpy array of dimensions *number_of_keypoints x 128*.\"\"\"\n self.input = img\n self.siftDetector = cv2.FeatureDetector_create('SIFT')\n self.siftDetector.setInt('nOctaveLayers', nOctaveLayers)\n self.siftDetector.setDouble('contrastThreshold', contrastThreshold)\n self.siftDetector.setInt('edgeThreshold', edgeThreshold)\n self.siftDetector.setDouble('sigma', sigma)\n self.siftExtractor = cv2.DescriptorExtractor_create('SIFT')\n self.keyPoints = None\n self.descriptors = None\n self.coordinates = None\n\n def SIFT_Keypoints_Descriptors(self, plot_flag=True):\n \"\"\"**Definition**: SIFT_Keypoints_Descriptors(plot_flag = True) Implements keypoint detection and descriptors computation. Detected keypoints are stored in **self.keyPoints** and computed descriptors are stored in **self.descriptors**. Additionally, it extracts keypoints coordinates. Coordinates are stored in **self.coordinates**. **Inputs**: * plot_flag (optional): default *True*. If *True* this method plots keypoints on source image *img*. **Outputs**: * *None*\"\"\"\n keyPoints = self.siftDetector.detect(self.input, None)\n keyPoints, descriptors = self.siftExtractor.compute(self.input, keyPoints)\n self.keyPoints = keyPoints\n self.descriptors = descriptors\n m = size(self.keyPoints)\n siftCoord = np.zeros([m, 2])\n for i in range(m):\n siftCoord[i][0] = self.keyPoints[i].pt[1]\n siftCoord[i][1] = self.keyPoints[i].pt[0]\n if plot_flag == True:\n figure()\n gray()\n imshow(self.input)\n plot([p[1] for p in siftCoord], [p[0] for p in siftCoord], '*')\n axis('off')\n show()\n self.coordinates = siftCoord\n", "source": "the_stack_v2_python_sparse", "source_path": "Descriptors/SIFT/SIFT.py", "source_repo": "kmakantasis/CV-Tools", "split": "val", "star_events_count": 0} {"blob_id": "13cba416c87f67fb9e755b41b823511d7a8db3fa", "bodies": ["self.host = host\nself.port = port\nself.data = None", "try:\n connection = Telnet(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n data = connection.read_all().decode('ascii').lstrip('|').rstrip('|').split('||')\n self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}\nexcept ConnectionRefusedError:\n _LOGGER.error('HDDTemp is not available at %s:%s', self.host, self.port)\n self.data = None\nexcept socket.gaierror:\n _LOGGER.error('HDDTemp host not found %s:%s', self.host, self.port)\n self.data = None"], "bodies_text": "<|body_start_0|>\n self.host = host\n self.port = port\n self.data = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n connection = Telnet(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n data = connection.read_all().decode('ascii').lstrip('|').rstrip('|').split('||')\n self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}\n except ConnectionRefusedError:\n _LOGGER.error('HDDTemp is not available at %s:%s', self.host, self.port)\n self.data = None\n except socket.gaierror:\n _LOGGER.error('HDDTemp host not found %s:%s', self.host, self.port)\n self.data = None\n<|end_body_1|>\n", "class_docstring": "Get the latest data from HDDTemp and update the states.", "class_name": "HddTempData", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HddTempData:\n \"\"\"Get the latest data from HDDTemp and update the states.\"\"\"\n\n def __init__(self, host, port):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from HDDTemp running as daemon.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.host = host\n self.port = port\n self.data = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n connection = Telnet(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n data = connection.read_all().decode('ascii').lstrip('|').rstrip('|').split('||')\n self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}\n except ConnectionRefusedError:\n _LOGGER.error('HDDTemp is not available at %s:%s', self.host, self.port)\n self.data = None\n except socket.gaierror:\n _LOGGER.error('HDDTemp host not found %s:%s', self.host, self.port)\n self.data = None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000422", "length_bytes": 4109, "license_type": "permissive", "methods": [{"docstring": "Initialize the data object.", "name": "__init__", "signature": "def __init__(self, host, port)"}, {"docstring": "Get the latest data from HDDTemp running as daemon.", "name": "update", "signature": "def update(self)"}], "n_methods": 2, "prompt": "Implement the Python class `HddTempData` described below.\n\nClass description:\nGet the latest data from HDDTemp and update the states.\n\nMethod signatures and docstrings:\n- def __init__(self, host, port): Initialize the data object.\n- def update(self): Get the latest data from HDDTemp running as daemon.", "prompted_full_text": "Implement the Python class `HddTempData` described below.\n\nClass description:\nGet the latest data from HDDTemp and update the states.\n\nMethod signatures and docstrings:\n- def __init__(self, host, port): Initialize the data object.\n- def update(self): Get the latest data from HDDTemp running as daemon.\n\n<|skeleton|>\nclass HddTempData:\n \"\"\"Get the latest data from HDDTemp and update the states.\"\"\"\n\n def __init__(self, host, port):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from HDDTemp running as daemon.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.host = host\n self.port = port\n self.data = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n connection = Telnet(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n data = connection.read_all().decode('ascii').lstrip('|').rstrip('|').split('||')\n self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}\n except ConnectionRefusedError:\n _LOGGER.error('HDDTemp is not available at %s:%s', self.host, self.port)\n self.data = None\n except socket.gaierror:\n _LOGGER.error('HDDTemp host not found %s:%s', self.host, self.port)\n self.data = None\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass HddTempData:\n \"\"\"Get the latest data from HDDTemp and update the states.\"\"\"\n\n def __init__(self, host, port):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data from HDDTemp running as daemon.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HddTempData:\n \"\"\"Get the latest data from HDDTemp and update the states.\"\"\"\n\n def __init__(self, host, port):\n \"\"\"Initialize the data object.\"\"\"\n self.host = host\n self.port = port\n self.data = None\n\n def update(self):\n \"\"\"Get the latest data from HDDTemp running as daemon.\"\"\"\n try:\n connection = Telnet(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n data = connection.read_all().decode('ascii').lstrip('|').rstrip('|').split('||')\n self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}\n except ConnectionRefusedError:\n _LOGGER.error('HDDTemp is not available at %s:%s', self.host, self.port)\n self.data = None\n except socket.gaierror:\n _LOGGER.error('HDDTemp host not found %s:%s', self.host, self.port)\n self.data = None\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/hddtemp/sensor.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "e460c6c04f5be29b526ca49e9aafada0b018b608", "bodies": ["if i >= len(word):\n return True\nres = False\nneighbors = [(row - 1, col), (row, col + 1), (row + 1, col), (row, col - 1)]\nfor index in neighbors:\n if -1 < index[0] < len(board) and -1 < index[1] < len(board[0]):\n if board[index[0]][index[1]] == word[i] and (not used[index[0]][index[1]]):\n used[index[0]][index[1]] = True\n res = res or self._dfs(board, used, index[0], index[1], word, i + 1)\n used[index[0]][index[1]] = False\nreturn res", "if not board or not board[0]:\n if not word:\n return True\n else:\n return False\nelif not word:\n return False\nm, n = (len(board), len(board[0]))\nused = [[False for j in range(n)] for i in range(m)]\nres = False\nfor i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n used[i][j] = True\n res = res or self._dfs(board, used, i, j, word, 1)\n used[i][j] = False\nreturn res"], "bodies_text": "<|body_start_0|>\n if i >= len(word):\n return True\n res = False\n neighbors = [(row - 1, col), (row, col + 1), (row + 1, col), (row, col - 1)]\n for index in neighbors:\n if -1 < index[0] < len(board) and -1 < index[1] < len(board[0]):\n if board[index[0]][index[1]] == word[i] and (not used[index[0]][index[1]]):\n used[index[0]][index[1]] = True\n res = res or self._dfs(board, used, index[0], index[1], word, i + 1)\n used[index[0]][index[1]] = False\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n if not word:\n return True\n else:\n return False\n elif not word:\n return False\n m, n = (len(board), len(board[0]))\n used = [[False for j in range(n)] for i in range(m)]\n res = False\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n used[i][j] = True\n res = res or self._dfs(board, used, i, j, word, 1)\n used[i][j] = False\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def _dfs(self, board, used, row, col, word, i):\n \"\"\"row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\"\"\"\n <|body_0|>\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if i >= len(word):\n return True\n res = False\n neighbors = [(row - 1, col), (row, col + 1), (row + 1, col), (row, col - 1)]\n for index in neighbors:\n if -1 < index[0] < len(board) and -1 < index[1] < len(board[0]):\n if board[index[0]][index[1]] == word[i] and (not used[index[0]][index[1]]):\n used[index[0]][index[1]] = True\n res = res or self._dfs(board, used, index[0], index[1], word, i + 1)\n used[index[0]][index[1]] = False\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n if not word:\n return True\n else:\n return False\n elif not word:\n return False\n m, n = (len(board), len(board[0]))\n used = [[False for j in range(n)] for i in range(m)]\n res = False\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n used[i][j] = True\n res = res or self._dfs(board, used, i, j, word, 1)\n used[i][j] = False\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000423", "length_bytes": 2381, "license_type": "no_license", "methods": [{"docstring": "row, col: search neighboring cells of [row][col] i: index of the character in word to be searched", "name": "_dfs", "signature": "def _dfs(self, board, used, row, col, word, i)"}, {"docstring": ":type board: List[List[str]] :type word: str :rtype: bool", "name": "exist", "signature": "def exist(self, board, word)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006329", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def _dfs(self, board, used, row, col, word, i): row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\n- def exist(self, board, word): :type board: List[List[str]] :type word: str :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def _dfs(self, board, used, row, col, word, i): row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\n- def exist(self, board, word): :type board: List[List[str]] :type word: str :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def _dfs(self, board, used, row, col, word, i):\n \"\"\"row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\"\"\"\n <|body_0|>\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if i >= len(word):\n return True\n res = False\n neighbors = [(row - 1, col), (row, col + 1), (row + 1, col), (row, col - 1)]\n for index in neighbors:\n if -1 < index[0] < len(board) and -1 < index[1] < len(board[0]):\n if board[index[0]][index[1]] == word[i] and (not used[index[0]][index[1]]):\n used[index[0]][index[1]] = True\n res = res or self._dfs(board, used, index[0], index[1], word, i + 1)\n used[index[0]][index[1]] = False\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not board or not board[0]:\n if not word:\n return True\n else:\n return False\n elif not word:\n return False\n m, n = (len(board), len(board[0]))\n used = [[False for j in range(n)] for i in range(m)]\n res = False\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n used[i][j] = True\n res = res or self._dfs(board, used, i, j, word, 1)\n used[i][j] = False\n return res\n<|end_body_1|>\n", "revision_id": "635af6e22aa8eef8e7920a585d43a45a891a8157", "skeleton": "<|skeleton|>\nclass Solution:\n\n def _dfs(self, board, used, row, col, word, i):\n \"\"\"row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\"\"\"\n <|body_0|>\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def _dfs(self, board, used, row, col, word, i):\n \"\"\"row, col: search neighboring cells of [row][col] i: index of the character in word to be searched\"\"\"\n if i >= len(word):\n return True\n res = False\n neighbors = [(row - 1, col), (row, col + 1), (row + 1, col), (row, col - 1)]\n for index in neighbors:\n if -1 < index[0] < len(board) and -1 < index[1] < len(board[0]):\n if board[index[0]][index[1]] == word[i] and (not used[index[0]][index[1]]):\n used[index[0]][index[1]] = True\n res = res or self._dfs(board, used, index[0], index[1], word, i + 1)\n used[index[0]][index[1]] = False\n return res\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n if not board or not board[0]:\n if not word:\n return True\n else:\n return False\n elif not word:\n return False\n m, n = (len(board), len(board[0]))\n used = [[False for j in range(n)] for i in range(m)]\n res = False\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n used[i][j] = True\n res = res or self._dfs(board, used, i, j, word, 1)\n used[i][j] = False\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "code79WordSearch.py", "source_repo": "cybelewang/leetcode-python", "split": "val", "star_events_count": 0} {"blob_id": "2c30f6e911df4529d8787f10afb498bc8bd6a3c7", "bodies": ["if not root:\n return ''\nresult = []\nnodes = [root]\nwhile nodes:\n node = nodes.pop()\n result.append(str(node.val))\n if node.right:\n nodes.append(node.right)\n if node.left:\n nodes.append(node.left)\nreturn ' '.join(result)", "if not data:\n return None\npreorder = [int(val) for val in data.split()]\nroot = TreeNode(preorder[0])\ns = [root]\nfor i in range(1, len(preorder)):\n node = None\n while s and preorder[i] > s[-1].val:\n node = s.pop()\n if node:\n node.right = TreeNode(preorder[i])\n s.append(node.right)\n else:\n s[-1].left = TreeNode(preorder[i])\n s.append(s[-1].left)\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return ''\n result = []\n nodes = [root]\n while nodes:\n node = nodes.pop()\n result.append(str(node.val))\n if node.right:\n nodes.append(node.right)\n if node.left:\n nodes.append(node.left)\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n preorder = [int(val) for val in data.split()]\n root = TreeNode(preorder[0])\n s = [root]\n for i in range(1, len(preorder)):\n node = None\n while s and preorder[i] > s[-1].val:\n node = s.pop()\n if node:\n node.right = TreeNode(preorder[i])\n s.append(node.right)\n else:\n s[-1].left = TreeNode(preorder[i])\n s.append(s[-1].left)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n result = []\n nodes = [root]\n while nodes:\n node = nodes.pop()\n result.append(str(node.val))\n if node.right:\n nodes.append(node.right)\n if node.left:\n nodes.append(node.left)\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n preorder = [int(val) for val in data.split()]\n root = TreeNode(preorder[0])\n s = [root]\n for i in range(1, len(preorder)):\n node = None\n while s and preorder[i] > s[-1].val:\n node = s.pop()\n if node:\n node.right = TreeNode(preorder[i])\n s.append(node.right)\n else:\n s[-1].left = TreeNode(preorder[i])\n s.append(s[-1].left)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000424", "length_bytes": 1324, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string.", "name": "serialize", "signature": "def serialize(self, root: TreeNode) -> str"}, {"docstring": "Decodes your encoded data to tree.", "name": "deserialize", "signature": "def deserialize(self, data: str) -> TreeNode"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n result = []\n nodes = [root]\n while nodes:\n node = nodes.pop()\n result.append(str(node.val))\n if node.right:\n nodes.append(node.right)\n if node.left:\n nodes.append(node.left)\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n preorder = [int(val) for val in data.split()]\n root = TreeNode(preorder[0])\n s = [root]\n for i in range(1, len(preorder)):\n node = None\n while s and preorder[i] > s[-1].val:\n node = s.pop()\n if node:\n node.right = TreeNode(preorder[i])\n s.append(node.right)\n else:\n s[-1].left = TreeNode(preorder[i])\n s.append(s[-1].left)\n return root\n<|end_body_1|>\n", "revision_id": "8a10b23335d8e9f080e5c39715b38bcc2916ff00", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n if not root:\n return ''\n result = []\n nodes = [root]\n while nodes:\n node = nodes.pop()\n result.append(str(node.val))\n if node.right:\n nodes.append(node.right)\n if node.left:\n nodes.append(node.left)\n return ' '.join(result)\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n if not data:\n return None\n preorder = [int(val) for val in data.split()]\n root = TreeNode(preorder[0])\n s = [root]\n for i in range(1, len(preorder)):\n node = None\n while s and preorder[i] > s[-1].val:\n node = s.pop()\n if node:\n node.right = TreeNode(preorder[i])\n s.append(node.right)\n else:\n s[-1].left = TreeNode(preorder[i])\n s.append(s[-1].left)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/449. Serialize and Deserialize BST/solution2.py", "source_repo": "hi0t/Outtalent", "split": "val", "star_events_count": 0} {"blob_id": "fef27cd72b16850aa12729c390e3c1b70364db94", "bodies": ["site = coupon.get_site()\ntry:\n twitter_account = TwitterAccount.objects.get(site=site)\n twitter_name = twitter_account.twitter_name\nexcept TwitterAccount.DoesNotExist:\n twitter_name = None\nqualifier = coupon.offer.qualifier\nif add_url and len(qualifier) > 31:\n qualifier = qualifier[:31] + '...'\nmessage = '%s %s - %s' % (coupon.offer.headline, qualifier, coupon.offer.business.short_business_name)\nif twitter_name:\n message += ' @%s.' % twitter_name\nif add_url:\n message += ' Go to: '\n long_url = 'http://%s%s' % (site.domain, reverse('view-single-coupon', kwargs={'slug': coupon.slug(), 'coupon_id': coupon.id}))\n message += shorten_url(long_url)\nLOG.info('build tweet message = %s ' % message)\nreturn message", "site = coupon.get_site()\nLOG.info('twitter_connect')\ntry:\n twitter_account = TwitterAccount.objects.get(site=site)\nexcept TwitterAccount.DoesNotExist:\n return\nif twitter_account.consumer_key and twitter_account.consumer_secret and twitter_account.access_key and twitter_account.access_secret and (twitter_account.consumer_key != '') and (twitter_account.consumer_secret != '') and (twitter_account.access_key != '') and (twitter_account.access_secret != ''):\n try:\n api = twitter.Api(consumer_key=twitter_account.consumer_key, consumer_secret=twitter_account.consumer_secret, access_token_key=twitter_account.access_key, access_token_secret=twitter_account.access_secret)\n if message:\n api.PostUpdate(message)\n LOG.debug('Success: tweet coupon id: %s' % str(coupon.id))\n return\n else:\n LOG.debug('Returning status')\n try:\n statuses = api.GetUserTimeline(twitter_account.twitter_name)\n status = str(statuses[0].text)\n except ValueError:\n status = None\n return status\n except twitter.TwitterError:\n LOG.error('Twitter connect failed for this site: %s' % site)\nreturn"], "bodies_text": "<|body_start_0|>\n site = coupon.get_site()\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n twitter_name = twitter_account.twitter_name\n except TwitterAccount.DoesNotExist:\n twitter_name = None\n qualifier = coupon.offer.qualifier\n if add_url and len(qualifier) > 31:\n qualifier = qualifier[:31] + '...'\n message = '%s %s - %s' % (coupon.offer.headline, qualifier, coupon.offer.business.short_business_name)\n if twitter_name:\n message += ' @%s.' % twitter_name\n if add_url:\n message += ' Go to: '\n long_url = 'http://%s%s' % (site.domain, reverse('view-single-coupon', kwargs={'slug': coupon.slug(), 'coupon_id': coupon.id}))\n message += shorten_url(long_url)\n LOG.info('build tweet message = %s ' % message)\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n site = coupon.get_site()\n LOG.info('twitter_connect')\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n except TwitterAccount.DoesNotExist:\n return\n if twitter_account.consumer_key and twitter_account.consumer_secret and twitter_account.access_key and twitter_account.access_secret and (twitter_account.consumer_key != '') and (twitter_account.consumer_secret != '') and (twitter_account.access_key != '') and (twitter_account.access_secret != ''):\n try:\n api = twitter.Api(consumer_key=twitter_account.consumer_key, consumer_secret=twitter_account.consumer_secret, access_token_key=twitter_account.access_key, access_token_secret=twitter_account.access_secret)\n if message:\n api.PostUpdate(message)\n LOG.debug('Success: tweet coupon id: %s' % str(coupon.id))\n return\n else:\n LOG.debug('Returning status')\n try:\n statuses = api.GetUserTimeline(twitter_account.twitter_name)\n status = str(statuses[0].text)\n except ValueError:\n status = None\n return status\n except twitter.TwitterError:\n LOG.error('Twitter connect failed for this site: %s' % site)\n return\n<|end_body_1|>\n", "class_docstring": "Class that helps deal with single coupon instances and things related to a single coupon.", "class_name": "TwitterService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TwitterService:\n \"\"\"Class that helps deal with single coupon instances and things related to a single coupon.\"\"\"\n\n def build_tweet_message(coupon, add_url=False):\n \"\"\"Create Tweet (Twitter Status) for this coupon.\"\"\"\n <|body_0|>\n\n def twitter_connect(coupon, message=None):\n \"\"\"Connect to Twitter then either update or return latest status.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n site = coupon.get_site()\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n twitter_name = twitter_account.twitter_name\n except TwitterAccount.DoesNotExist:\n twitter_name = None\n qualifier = coupon.offer.qualifier\n if add_url and len(qualifier) > 31:\n qualifier = qualifier[:31] + '...'\n message = '%s %s - %s' % (coupon.offer.headline, qualifier, coupon.offer.business.short_business_name)\n if twitter_name:\n message += ' @%s.' % twitter_name\n if add_url:\n message += ' Go to: '\n long_url = 'http://%s%s' % (site.domain, reverse('view-single-coupon', kwargs={'slug': coupon.slug(), 'coupon_id': coupon.id}))\n message += shorten_url(long_url)\n LOG.info('build tweet message = %s ' % message)\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n site = coupon.get_site()\n LOG.info('twitter_connect')\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n except TwitterAccount.DoesNotExist:\n return\n if twitter_account.consumer_key and twitter_account.consumer_secret and twitter_account.access_key and twitter_account.access_secret and (twitter_account.consumer_key != '') and (twitter_account.consumer_secret != '') and (twitter_account.access_key != '') and (twitter_account.access_secret != ''):\n try:\n api = twitter.Api(consumer_key=twitter_account.consumer_key, consumer_secret=twitter_account.consumer_secret, access_token_key=twitter_account.access_key, access_token_secret=twitter_account.access_secret)\n if message:\n api.PostUpdate(message)\n LOG.debug('Success: tweet coupon id: %s' % str(coupon.id))\n return\n else:\n LOG.debug('Returning status')\n try:\n statuses = api.GetUserTimeline(twitter_account.twitter_name)\n status = str(statuses[0].text)\n except ValueError:\n status = None\n return status\n except twitter.TwitterError:\n LOG.error('Twitter connect failed for this site: %s' % site)\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000425", "length_bytes": 3319, "license_type": "no_license", "methods": [{"docstring": "Create Tweet (Twitter Status) for this coupon.", "name": "build_tweet_message", "signature": "def build_tweet_message(coupon, add_url=False)"}, {"docstring": "Connect to Twitter then either update or return latest status.", "name": "twitter_connect", "signature": "def twitter_connect(coupon, message=None)"}], "n_methods": 2, "prompt": "Implement the Python class `TwitterService` described below.\n\nClass description:\nClass that helps deal with single coupon instances and things related to a single coupon.\n\nMethod signatures and docstrings:\n- def build_tweet_message(coupon, add_url=False): Create Tweet (Twitter Status) for this coupon.\n- def twitter_connect(coupon, message=None): Connect to Twitter then either update or return latest status.", "prompted_full_text": "Implement the Python class `TwitterService` described below.\n\nClass description:\nClass that helps deal with single coupon instances and things related to a single coupon.\n\nMethod signatures and docstrings:\n- def build_tweet_message(coupon, add_url=False): Create Tweet (Twitter Status) for this coupon.\n- def twitter_connect(coupon, message=None): Connect to Twitter then either update or return latest status.\n\n<|skeleton|>\nclass TwitterService:\n \"\"\"Class that helps deal with single coupon instances and things related to a single coupon.\"\"\"\n\n def build_tweet_message(coupon, add_url=False):\n \"\"\"Create Tweet (Twitter Status) for this coupon.\"\"\"\n <|body_0|>\n\n def twitter_connect(coupon, message=None):\n \"\"\"Connect to Twitter then either update or return latest status.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n site = coupon.get_site()\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n twitter_name = twitter_account.twitter_name\n except TwitterAccount.DoesNotExist:\n twitter_name = None\n qualifier = coupon.offer.qualifier\n if add_url and len(qualifier) > 31:\n qualifier = qualifier[:31] + '...'\n message = '%s %s - %s' % (coupon.offer.headline, qualifier, coupon.offer.business.short_business_name)\n if twitter_name:\n message += ' @%s.' % twitter_name\n if add_url:\n message += ' Go to: '\n long_url = 'http://%s%s' % (site.domain, reverse('view-single-coupon', kwargs={'slug': coupon.slug(), 'coupon_id': coupon.id}))\n message += shorten_url(long_url)\n LOG.info('build tweet message = %s ' % message)\n return message\n<|end_body_0|>\n\n<|body_start_1|>\n site = coupon.get_site()\n LOG.info('twitter_connect')\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n except TwitterAccount.DoesNotExist:\n return\n if twitter_account.consumer_key and twitter_account.consumer_secret and twitter_account.access_key and twitter_account.access_secret and (twitter_account.consumer_key != '') and (twitter_account.consumer_secret != '') and (twitter_account.access_key != '') and (twitter_account.access_secret != ''):\n try:\n api = twitter.Api(consumer_key=twitter_account.consumer_key, consumer_secret=twitter_account.consumer_secret, access_token_key=twitter_account.access_key, access_token_secret=twitter_account.access_secret)\n if message:\n api.PostUpdate(message)\n LOG.debug('Success: tweet coupon id: %s' % str(coupon.id))\n return\n else:\n LOG.debug('Returning status')\n try:\n statuses = api.GetUserTimeline(twitter_account.twitter_name)\n status = str(statuses[0].text)\n except ValueError:\n status = None\n return status\n except twitter.TwitterError:\n LOG.error('Twitter connect failed for this site: %s' % site)\n return\n<|end_body_1|>\n", "revision_id": "a780ccdc3350d4b5c7990c65d1af8d71060c62cc", "skeleton": "<|skeleton|>\nclass TwitterService:\n \"\"\"Class that helps deal with single coupon instances and things related to a single coupon.\"\"\"\n\n def build_tweet_message(coupon, add_url=False):\n \"\"\"Create Tweet (Twitter Status) for this coupon.\"\"\"\n <|body_0|>\n\n def twitter_connect(coupon, message=None):\n \"\"\"Connect to Twitter then either update or return latest status.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TwitterService:\n \"\"\"Class that helps deal with single coupon instances and things related to a single coupon.\"\"\"\n\n def build_tweet_message(coupon, add_url=False):\n \"\"\"Create Tweet (Twitter Status) for this coupon.\"\"\"\n site = coupon.get_site()\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n twitter_name = twitter_account.twitter_name\n except TwitterAccount.DoesNotExist:\n twitter_name = None\n qualifier = coupon.offer.qualifier\n if add_url and len(qualifier) > 31:\n qualifier = qualifier[:31] + '...'\n message = '%s %s - %s' % (coupon.offer.headline, qualifier, coupon.offer.business.short_business_name)\n if twitter_name:\n message += ' @%s.' % twitter_name\n if add_url:\n message += ' Go to: '\n long_url = 'http://%s%s' % (site.domain, reverse('view-single-coupon', kwargs={'slug': coupon.slug(), 'coupon_id': coupon.id}))\n message += shorten_url(long_url)\n LOG.info('build tweet message = %s ' % message)\n return message\n\n def twitter_connect(coupon, message=None):\n \"\"\"Connect to Twitter then either update or return latest status.\"\"\"\n site = coupon.get_site()\n LOG.info('twitter_connect')\n try:\n twitter_account = TwitterAccount.objects.get(site=site)\n except TwitterAccount.DoesNotExist:\n return\n if twitter_account.consumer_key and twitter_account.consumer_secret and twitter_account.access_key and twitter_account.access_secret and (twitter_account.consumer_key != '') and (twitter_account.consumer_secret != '') and (twitter_account.access_key != '') and (twitter_account.access_secret != ''):\n try:\n api = twitter.Api(consumer_key=twitter_account.consumer_key, consumer_secret=twitter_account.consumer_secret, access_token_key=twitter_account.access_key, access_token_secret=twitter_account.access_secret)\n if message:\n api.PostUpdate(message)\n LOG.debug('Success: tweet coupon id: %s' % str(coupon.id))\n return\n else:\n LOG.debug('Returning status')\n try:\n statuses = api.GetUserTimeline(twitter_account.twitter_name)\n status = str(statuses[0].text)\n except ValueError:\n status = None\n return status\n except twitter.TwitterError:\n LOG.error('Twitter connect failed for this site: %s' % site)\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "coupon/service/twitter_service.py", "source_repo": "wcirillo/ten", "split": "val", "star_events_count": 0} {"blob_id": "c1cd41f0c9f2b559e3a5b65ee403196ed4936e17", "bodies": ["\"\"\"\n Treat each node as root, calculate their depths, return the minimum roots.\n This method will get TLE\n \"\"\"\ngraph = [[] for _ in range(n)]\nfor a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n\ndef get_height(root, visited):\n visited.add(root)\n height = 0\n for adj in graph[root]:\n if adj not in visited:\n height = max(height, get_height(adj, visited))\n return height + 1\nmin_height = float('+inf')\nheights = [0] * n\nfor root in range(n):\n height = get_height(root, set())\n heights[root] = height\n min_height = min(min_height, height)\nreturn [node for node, height in enumerate(heights) if height == min_height]", "from collections import deque\nif n <= 2:\n return [i for i in range(n)]\ngraph = [[] for _ in range(n)]\nfor a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\nq = deque()\nfor node in range(n):\n if len(graph[node]) == 1:\n q.append(node)\nnum_remain_node = n\nwhile num_remain_node > 2:\n sz = len(q)\n for _ in range(sz):\n node = q.popleft()\n for adj in graph[node]:\n graph[adj].remove(node)\n if len(graph[adj]) == 1:\n q.append(adj)\n num_remain_node -= 1\nreturn [node for node in q]"], "bodies_text": "<|body_start_0|>\n \"\"\"\n Treat each node as root, calculate their depths, return the minimum roots.\n This method will get TLE\n \"\"\"\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n\n def get_height(root, visited):\n visited.add(root)\n height = 0\n for adj in graph[root]:\n if adj not in visited:\n height = max(height, get_height(adj, visited))\n return height + 1\n min_height = float('+inf')\n heights = [0] * n\n for root in range(n):\n height = get_height(root, set())\n heights[root] = height\n min_height = min(min_height, height)\n return [node for node, height in enumerate(heights) if height == min_height]\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if n <= 2:\n return [i for i in range(n)]\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n q = deque()\n for node in range(n):\n if len(graph[node]) == 1:\n q.append(node)\n num_remain_node = n\n while num_remain_node > 2:\n sz = len(q)\n for _ in range(sz):\n node = q.popleft()\n for adj in graph[node]:\n graph[adj].remove(node)\n if len(graph[adj]) == 1:\n q.append(adj)\n num_remain_node -= 1\n return [node for node in q]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"Brute Force, Time: O(V^2), Space: O(V)\"\"\"\n <|body_0|>\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"BFS, Time: O(V), Space: O(V)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n Treat each node as root, calculate their depths, return the minimum roots.\n This method will get TLE\n \"\"\"\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n\n def get_height(root, visited):\n visited.add(root)\n height = 0\n for adj in graph[root]:\n if adj not in visited:\n height = max(height, get_height(adj, visited))\n return height + 1\n min_height = float('+inf')\n heights = [0] * n\n for root in range(n):\n height = get_height(root, set())\n heights[root] = height\n min_height = min(min_height, height)\n return [node for node, height in enumerate(heights) if height == min_height]\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if n <= 2:\n return [i for i in range(n)]\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n q = deque()\n for node in range(n):\n if len(graph[node]) == 1:\n q.append(node)\n num_remain_node = n\n while num_remain_node > 2:\n sz = len(q)\n for _ in range(sz):\n node = q.popleft()\n for adj in graph[node]:\n graph[adj].remove(node)\n if len(graph[adj]) == 1:\n q.append(adj)\n num_remain_node -= 1\n return [node for node in q]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000426", "length_bytes": 3079, "license_type": "no_license", "methods": [{"docstring": "Brute Force, Time: O(V^2), Space: O(V)", "name": "findMinHeightTrees", "signature": "def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]"}, {"docstring": "BFS, Time: O(V), Space: O(V)", "name": "findMinHeightTrees", "signature": "def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]: Brute Force, Time: O(V^2), Space: O(V)\n- def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]: BFS, Time: O(V), Space: O(V)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]: Brute Force, Time: O(V^2), Space: O(V)\n- def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]: BFS, Time: O(V), Space: O(V)\n\n<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"Brute Force, Time: O(V^2), Space: O(V)\"\"\"\n <|body_0|>\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"BFS, Time: O(V), Space: O(V)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n Treat each node as root, calculate their depths, return the minimum roots.\n This method will get TLE\n \"\"\"\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n\n def get_height(root, visited):\n visited.add(root)\n height = 0\n for adj in graph[root]:\n if adj not in visited:\n height = max(height, get_height(adj, visited))\n return height + 1\n min_height = float('+inf')\n heights = [0] * n\n for root in range(n):\n height = get_height(root, set())\n heights[root] = height\n min_height = min(min_height, height)\n return [node for node, height in enumerate(heights) if height == min_height]\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if n <= 2:\n return [i for i in range(n)]\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n q = deque()\n for node in range(n):\n if len(graph[node]) == 1:\n q.append(node)\n num_remain_node = n\n while num_remain_node > 2:\n sz = len(q)\n for _ in range(sz):\n node = q.popleft()\n for adj in graph[node]:\n graph[adj].remove(node)\n if len(graph[adj]) == 1:\n q.append(adj)\n num_remain_node -= 1\n return [node for node in q]\n<|end_body_1|>\n", "revision_id": "72136e3487d239f5b37e2d6393e034262a6bf599", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"Brute Force, Time: O(V^2), Space: O(V)\"\"\"\n <|body_0|>\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"BFS, Time: O(V), Space: O(V)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"Brute Force, Time: O(V^2), Space: O(V)\"\"\"\n \"\"\"\n Treat each node as root, calculate their depths, return the minimum roots.\n This method will get TLE\n \"\"\"\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n\n def get_height(root, visited):\n visited.add(root)\n height = 0\n for adj in graph[root]:\n if adj not in visited:\n height = max(height, get_height(adj, visited))\n return height + 1\n min_height = float('+inf')\n heights = [0] * n\n for root in range(n):\n height = get_height(root, set())\n heights[root] = height\n min_height = min(min_height, height)\n return [node for node, height in enumerate(heights) if height == min_height]\n\n def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:\n \"\"\"BFS, Time: O(V), Space: O(V)\"\"\"\n from collections import deque\n if n <= 2:\n return [i for i in range(n)]\n graph = [[] for _ in range(n)]\n for a, b in edges:\n graph[a].append(b)\n graph[b].append(a)\n q = deque()\n for node in range(n):\n if len(graph[node]) == 1:\n q.append(node)\n num_remain_node = n\n while num_remain_node > 2:\n sz = len(q)\n for _ in range(sz):\n node = q.popleft()\n for adj in graph[node]:\n graph[adj].remove(node)\n if len(graph[adj]) == 1:\n q.append(adj)\n num_remain_node -= 1\n return [node for node in q]\n", "source": "the_stack_v2_python_sparse", "source_path": "python/310-Minimum Height Trees.py", "source_repo": "cwza/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "b864bdebbd1b5da5684a8cdce57640e5fecbda88", "bodies": ["counter = Counter(deck)\ngcd = None\nfor v in counter.values():\n if gcd is None:\n gcd = v\n gcd = self.gcd(gcd, v)\n if gcd == 1:\n return False\nreturn True", "while b:\n a, b = (b, a % b)\nreturn a"], "bodies_text": "<|body_start_0|>\n counter = Counter(deck)\n gcd = None\n for v in counter.values():\n if gcd is None:\n gcd = v\n gcd = self.gcd(gcd, v)\n if gcd == 1:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n while b:\n a, b = (b, a % b)\n return a\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n \"\"\"gcd of all > 2\"\"\"\n <|body_0|>\n\n def gcd(self, a, b):\n \"\"\"a = k * b + r gcd(a, b) = gcd(b, r)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n counter = Counter(deck)\n gcd = None\n for v in counter.values():\n if gcd is None:\n gcd = v\n gcd = self.gcd(gcd, v)\n if gcd == 1:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n while b:\n a, b = (b, a % b)\n return a\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000427", "length_bytes": 1394, "license_type": "no_license", "methods": [{"docstring": "gcd of all > 2", "name": "hasGroupsSizeX", "signature": "def hasGroupsSizeX(self, deck: List[int]) -> bool"}, {"docstring": "a = k * b + r gcd(a, b) = gcd(b, r)", "name": "gcd", "signature": "def gcd(self, a, b)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005939", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasGroupsSizeX(self, deck: List[int]) -> bool: gcd of all > 2\n- def gcd(self, a, b): a = k * b + r gcd(a, b) = gcd(b, r)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasGroupsSizeX(self, deck: List[int]) -> bool: gcd of all > 2\n- def gcd(self, a, b): a = k * b + r gcd(a, b) = gcd(b, r)\n\n<|skeleton|>\nclass Solution:\n\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n \"\"\"gcd of all > 2\"\"\"\n <|body_0|>\n\n def gcd(self, a, b):\n \"\"\"a = k * b + r gcd(a, b) = gcd(b, r)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n counter = Counter(deck)\n gcd = None\n for v in counter.values():\n if gcd is None:\n gcd = v\n gcd = self.gcd(gcd, v)\n if gcd == 1:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n while b:\n a, b = (b, a % b)\n return a\n<|end_body_1|>\n", "revision_id": "929dde1723fb2f54870c8a9badc80fc23e8400d3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n \"\"\"gcd of all > 2\"\"\"\n <|body_0|>\n\n def gcd(self, a, b):\n \"\"\"a = k * b + r gcd(a, b) = gcd(b, r)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n \"\"\"gcd of all > 2\"\"\"\n counter = Counter(deck)\n gcd = None\n for v in counter.values():\n if gcd is None:\n gcd = v\n gcd = self.gcd(gcd, v)\n if gcd == 1:\n return False\n return True\n\n def gcd(self, a, b):\n \"\"\"a = k * b + r gcd(a, b) = gcd(b, r)\"\"\"\n while b:\n a, b = (b, a % b)\n return a\n", "source": "the_stack_v2_python_sparse", "source_path": "_algorithms_challenges/leetcode/LeetCode/914 X of a Kind in a Deck of Cards.py", "source_repo": "syurskyi/Algorithms_and_Data_Structure", "split": "val", "star_events_count": 4} {"blob_id": "766ce05099a1e5082c298c2dded4178b4c11a6fd", "bodies": ["parameters = dict()\nparameters['page'] = GraphQLParam(page, 'PageInput', False)\nparameters['filter'] = GraphQLParam(dc_filter, 'DataCenterFilter', False)\nparameters['sort'] = GraphQLParam(sort, 'DataCenterSort', False)\nresponse = self._query(name='getDataCenters', params=parameters, fields=DataCenterList.fields())\nreturn DataCenterList(response)", "parameters = dict()\nparameters['input'] = GraphQLParam(create_input, 'CreateDataCenterInput', True)\nresponse = self._mutation(name='createDataCenter', params=parameters, fields=DataCenter.fields())\nreturn DataCenter(response)", "parameters = dict()\nparameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\nparameters['input'] = GraphQLParam(delete_input, 'DeleteDataCenterInput', False)\nresponse = self._mutation(name='deleteDataCenter', params=parameters, fields=None)\nreturn response", "parameters = dict()\nparameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\nparameters['input'] = GraphQLParam(update_input, 'UpsertDataCenterInput', False)\nresponse = self._mutation(name='updateDataCenter', params=parameters, fields=DataCenter.fields())\nreturn DataCenter(response)"], "bodies_text": "<|body_start_0|>\n parameters = dict()\n parameters['page'] = GraphQLParam(page, 'PageInput', False)\n parameters['filter'] = GraphQLParam(dc_filter, 'DataCenterFilter', False)\n parameters['sort'] = GraphQLParam(sort, 'DataCenterSort', False)\n response = self._query(name='getDataCenters', params=parameters, fields=DataCenterList.fields())\n return DataCenterList(response)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = dict()\n parameters['input'] = GraphQLParam(create_input, 'CreateDataCenterInput', True)\n response = self._mutation(name='createDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(delete_input, 'DeleteDataCenterInput', False)\n response = self._mutation(name='deleteDataCenter', params=parameters, fields=None)\n return response\n<|end_body_2|>\n\n<|body_start_3|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(update_input, 'UpsertDataCenterInput', False)\n response = self._mutation(name='updateDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_3|>\n", "class_docstring": "Mixin to add datacenter related methods to the GraphQL client", "class_name": "DatacentersMixin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatacentersMixin:\n \"\"\"Mixin to add datacenter related methods to the GraphQL client\"\"\"\n\n def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList:\n \"\"\"Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\"\"\"\n <|body_0|>\n\n def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter:\n \"\"\"Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_1|>\n\n def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool:\n \"\"\"Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_2|>\n\n def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter:\n \"\"\"Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parameters = dict()\n parameters['page'] = GraphQLParam(page, 'PageInput', False)\n parameters['filter'] = GraphQLParam(dc_filter, 'DataCenterFilter', False)\n parameters['sort'] = GraphQLParam(sort, 'DataCenterSort', False)\n response = self._query(name='getDataCenters', params=parameters, fields=DataCenterList.fields())\n return DataCenterList(response)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = dict()\n parameters['input'] = GraphQLParam(create_input, 'CreateDataCenterInput', True)\n response = self._mutation(name='createDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(delete_input, 'DeleteDataCenterInput', False)\n response = self._mutation(name='deleteDataCenter', params=parameters, fields=None)\n return response\n<|end_body_2|>\n\n<|body_start_3|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(update_input, 'UpsertDataCenterInput', False)\n response = self._mutation(name='updateDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000428", "length_bytes": 30254, "license_type": "permissive", "methods": [{"docstring": "Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :", "name": "get_datacenters", "signature": "def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList"}, {"docstring": "Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.", "name": "create_datacenter", "signature": "def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter"}, {"docstring": "Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.", "name": "delete_datacenter", "signature": "def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool"}, {"docstring": "Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.", "name": "update_datacenter", "signature": "def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003189", "prompt": "Implement the Python class `DatacentersMixin` described below.\n\nClass description:\nMixin to add datacenter related methods to the GraphQL client\n\nMethod signatures and docstrings:\n- def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList: Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\n- def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter: Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\n- def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool: Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\n- def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter: Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.", "prompted_full_text": "Implement the Python class `DatacentersMixin` described below.\n\nClass description:\nMixin to add datacenter related methods to the GraphQL client\n\nMethod signatures and docstrings:\n- def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList: Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\n- def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter: Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\n- def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool: Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\n- def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter: Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.\n\n<|skeleton|>\nclass DatacentersMixin:\n \"\"\"Mixin to add datacenter related methods to the GraphQL client\"\"\"\n\n def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList:\n \"\"\"Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\"\"\"\n <|body_0|>\n\n def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter:\n \"\"\"Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_1|>\n\n def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool:\n \"\"\"Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_2|>\n\n def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter:\n \"\"\"Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parameters = dict()\n parameters['page'] = GraphQLParam(page, 'PageInput', False)\n parameters['filter'] = GraphQLParam(dc_filter, 'DataCenterFilter', False)\n parameters['sort'] = GraphQLParam(sort, 'DataCenterSort', False)\n response = self._query(name='getDataCenters', params=parameters, fields=DataCenterList.fields())\n return DataCenterList(response)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = dict()\n parameters['input'] = GraphQLParam(create_input, 'CreateDataCenterInput', True)\n response = self._mutation(name='createDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(delete_input, 'DeleteDataCenterInput', False)\n response = self._mutation(name='deleteDataCenter', params=parameters, fields=None)\n return response\n<|end_body_2|>\n\n<|body_start_3|>\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(update_input, 'UpsertDataCenterInput', False)\n response = self._mutation(name='updateDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n<|end_body_3|>\n", "revision_id": "8ea044096bd18aaccbfb81eca4e26ec29895a18c", "skeleton": "<|skeleton|>\nclass DatacentersMixin:\n \"\"\"Mixin to add datacenter related methods to the GraphQL client\"\"\"\n\n def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList:\n \"\"\"Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\"\"\"\n <|body_0|>\n\n def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter:\n \"\"\"Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_1|>\n\n def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool:\n \"\"\"Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_2|>\n\n def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter:\n \"\"\"Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DatacentersMixin:\n \"\"\"Mixin to add datacenter related methods to the GraphQL client\"\"\"\n\n def get_datacenters(self, page: PageInput=None, dc_filter: DataCenterFilter=None, sort: DataCenterSort=None) -> DataCenterList:\n \"\"\"Retrieves a list of datacenter objects :param page: The requested page from the server. This is an optional argument and if omitted the server will default to returning the first page with a maximum of ``100`` items. :type page: PageInput, optional :param dc_filter: A filter object to filter the datacenters on the server. If omitted, the server will return all objects as a paginated response. :type dc_filter: DataCenterFilter, optional :param sort: A sort definition object to sort the datacenter objects on supported properties. If omitted objects are returned in the order as they were created in. :type sort: DataCenterSort, optional :returns DataCenterList: A paginated list of datacenters. :\"\"\"\n parameters = dict()\n parameters['page'] = GraphQLParam(page, 'PageInput', False)\n parameters['filter'] = GraphQLParam(dc_filter, 'DataCenterFilter', False)\n parameters['sort'] = GraphQLParam(sort, 'DataCenterSort', False)\n response = self._query(name='getDataCenters', params=parameters, fields=DataCenterList.fields())\n return DataCenterList(response)\n\n def create_datacenter(self, create_input: CreateDataCenterInput=None) -> DataCenter:\n \"\"\"Allows creation of a new datacenter object A datacenter record allows customers to logically organize their infrastructure by physical location and associate address and contact information with the physical location. This is useful for effective support case handling and reporting purposes. :param create_input: A property definition for the new datacenter :type create_input: CreateDataCenterInput :returns DataCenter: The new datacenter. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n parameters = dict()\n parameters['input'] = GraphQLParam(create_input, 'CreateDataCenterInput', True)\n response = self._mutation(name='createDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n\n def delete_datacenter(self, uuid: str, delete_input: DeleteDataCenterInput=None) -> bool:\n \"\"\"Allows deletion of an existing datacenter object The deletion of a datacenter is only possible if the datacenter has no hosts (servers) associated with any child items. :param uuid: The unique identifier of the datacenter to delete :type uuid: str :param delete_input: Optional parameters for the delete operation :type delete_input: DeleteDataCenterInput, optional :returns bool: If the query was successful :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(delete_input, 'DeleteDataCenterInput', False)\n response = self._mutation(name='deleteDataCenter', params=parameters, fields=None)\n return response\n\n def update_datacenter(self, uuid: str, update_input: UpdateDataCenterInput) -> DataCenter:\n \"\"\"Allows updating properties of an existing datacenter object :param uuid: The unique identifier of the datacenter to update :type uuid: str :param update_input: A property definition for the datacenter updates :type update_input: UpdateDataCenterInput :returns DataCenter: The updated datacenter object. :raises GraphQLError: An error with the GraphQL endpoint.\"\"\"\n parameters = dict()\n parameters['uuid'] = GraphQLParam(uuid, 'UUID', True)\n parameters['input'] = GraphQLParam(update_input, 'UpsertDataCenterInput', False)\n response = self._mutation(name='updateDataCenter', params=parameters, fields=DataCenter.fields())\n return DataCenter(response)\n", "source": "the_stack_v2_python_sparse", "source_path": "nebpyclient/api/datacenters.py", "source_repo": "firefly707/nebpyclient", "split": "val", "star_events_count": 0} {"blob_id": "8336d0bcc1a9ef04698dacc1f18ab425adca17dc", "bodies": ["node = head\ncount = 0\nwhile node:\n count += 1\n node = node.next\nnode = head\nfor _ in range(count // 2):\n node = node.next\nreturn node", "fast = slow = head\nwhile fast and fast.next:\n slow = slow.next\n fast = fast.next.next\nreturn slow"], "bodies_text": "<|body_start_0|>\n node = head\n count = 0\n while node:\n count += 1\n node = node.next\n node = head\n for _ in range(count // 2):\n node = node.next\n return node\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def middleNode(self, head):\n \"\"\"解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def middleNode2(self, head):\n \"\"\"解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n node = head\n count = 0\n while node:\n count += 1\n node = node.next\n node = head\n for _ in range(count // 2):\n node = node.next\n return node\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000429", "length_bytes": 2066, "license_type": "no_license", "methods": [{"docstring": "解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode", "name": "middleNode", "signature": "def middleNode(self, head)"}, {"docstring": "解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:", "name": "middleNode2", "signature": "def middleNode2(self, head)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def middleNode(self, head): 解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\n- def middleNode2(self, head): 解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def middleNode(self, head): 解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\n- def middleNode2(self, head): 解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:\n\n<|skeleton|>\nclass Solution:\n\n def middleNode(self, head):\n \"\"\"解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def middleNode2(self, head):\n \"\"\"解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n node = head\n count = 0\n while node:\n count += 1\n node = node.next\n node = head\n for _ in range(count // 2):\n node = node.next\n return node\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_1|>\n", "revision_id": "8595b04cf5a024c2cd8a97f750d890a818568401", "skeleton": "<|skeleton|>\nclass Solution:\n\n def middleNode(self, head):\n \"\"\"解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def middleNode2(self, head):\n \"\"\"解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def middleNode(self, head):\n \"\"\"解題思路:暴力法。 從頭開始遍歷並計算linked list的長度,找完後再回頭遍歷middle node :type head: ListNode :rtype: ListNode\"\"\"\n node = head\n count = 0\n while node:\n count += 1\n node = node.next\n node = head\n for _ in range(count // 2):\n node = node.next\n return node\n\n def middleNode2(self, head):\n \"\"\"解題思路: 例用兩個指標fast和slow從頭遍歷,當fast走完linked list時,slow會剛好停在middle node 時間複雜度: O(N) 空間複雜度: O(1) :param head: :return:\"\"\"\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n", "source": "the_stack_v2_python_sparse", "source_path": "python/876.middle-of-the-linked-list.py", "source_repo": "tainenko/Leetcode2019", "split": "val", "star_events_count": 5} {"blob_id": "8a0df03cb743d8d5818818c61fc699fa86c1505e", "bodies": ["self.d_model = d_model\nself.d_k = d_k\nself.d_v = d_v\nself.sequence_length = sequence_length\nself.h = h\nself.num_layer = num_layer\nself.batch_size = batch_size\nself.decoder_sent_length = decoder_sent_length", "with tf.variable_scope('sub_layer_postion_wise_feed_forward' + str(layer_index)):\n postion_wise_feed_forward = PositionWiseFeedFoward(x, layer_index, d_model=self.d_model, d_ff=self.d_model * 4)\n postion_wise_feed_forward_output = postion_wise_feed_forward.position_wise_feed_forward_fn()\nreturn postion_wise_feed_forward_output", "with tf.variable_scope('base_mode_sub_layer_multi_head_attention_' + str(layer_index)):\n multi_head_attention_class = MultiHeadAttention(Q, K_s, V_s, self.d_model, self.d_k, self.d_v, self.sequence_length, self.h, is_training=is_training, mask=mask, dropout_rate=1.0 - dropout_keep_prob)\n sub_layer_multi_head_attention_output = multi_head_attention_class.multi_head_attention_fn()\nreturn sub_layer_multi_head_attention_output", "variable_scope = 'sub_layer_layer_norm_residual_connection_' + str(layer_index) + '_' + sub_layer_name\nwith tf.variable_scope(variable_scope):\n layer_norm_residual_conn = LayerNormResidualConnection(layer_input, layer_output, layer_index, residual_dropout=1 - dropout_keep_prob, use_residual_conn=use_residual_conn)\n output = layer_norm_residual_conn.layer_norm_residual_connection()\nreturn output"], "bodies_text": "<|body_start_0|>\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.sequence_length = sequence_length\n self.h = h\n self.num_layer = num_layer\n self.batch_size = batch_size\n self.decoder_sent_length = decoder_sent_length\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope('sub_layer_postion_wise_feed_forward' + str(layer_index)):\n postion_wise_feed_forward = PositionWiseFeedFoward(x, layer_index, d_model=self.d_model, d_ff=self.d_model * 4)\n postion_wise_feed_forward_output = postion_wise_feed_forward.position_wise_feed_forward_fn()\n return postion_wise_feed_forward_output\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('base_mode_sub_layer_multi_head_attention_' + str(layer_index)):\n multi_head_attention_class = MultiHeadAttention(Q, K_s, V_s, self.d_model, self.d_k, self.d_v, self.sequence_length, self.h, is_training=is_training, mask=mask, dropout_rate=1.0 - dropout_keep_prob)\n sub_layer_multi_head_attention_output = multi_head_attention_class.multi_head_attention_fn()\n return sub_layer_multi_head_attention_output\n<|end_body_2|>\n\n<|body_start_3|>\n variable_scope = 'sub_layer_layer_norm_residual_connection_' + str(layer_index) + '_' + sub_layer_name\n with tf.variable_scope(variable_scope):\n layer_norm_residual_conn = LayerNormResidualConnection(layer_input, layer_output, layer_index, residual_dropout=1 - dropout_keep_prob, use_residual_conn=use_residual_conn)\n output = layer_norm_residual_conn.layer_norm_residual_connection()\n return output\n<|end_body_3|>\n", "class_docstring": "base class has some common fields and functions.", "class_name": "BaseClass", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseClass:\n \"\"\"base class has some common fields and functions.\"\"\"\n\n def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None):\n \"\"\":param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\"\"\"\n <|body_0|>\n\n def sub_layer_postion_wise_feed_forward(self, x, layer_index):\n \"\"\"position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\"\"\"\n <|body_1|>\n\n def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9):\n \"\"\"multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\"\"\"\n <|body_2|>\n\n def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'):\n \"\"\"layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.sequence_length = sequence_length\n self.h = h\n self.num_layer = num_layer\n self.batch_size = batch_size\n self.decoder_sent_length = decoder_sent_length\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope('sub_layer_postion_wise_feed_forward' + str(layer_index)):\n postion_wise_feed_forward = PositionWiseFeedFoward(x, layer_index, d_model=self.d_model, d_ff=self.d_model * 4)\n postion_wise_feed_forward_output = postion_wise_feed_forward.position_wise_feed_forward_fn()\n return postion_wise_feed_forward_output\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('base_mode_sub_layer_multi_head_attention_' + str(layer_index)):\n multi_head_attention_class = MultiHeadAttention(Q, K_s, V_s, self.d_model, self.d_k, self.d_v, self.sequence_length, self.h, is_training=is_training, mask=mask, dropout_rate=1.0 - dropout_keep_prob)\n sub_layer_multi_head_attention_output = multi_head_attention_class.multi_head_attention_fn()\n return sub_layer_multi_head_attention_output\n<|end_body_2|>\n\n<|body_start_3|>\n variable_scope = 'sub_layer_layer_norm_residual_connection_' + str(layer_index) + '_' + sub_layer_name\n with tf.variable_scope(variable_scope):\n layer_norm_residual_conn = LayerNormResidualConnection(layer_input, layer_output, layer_index, residual_dropout=1 - dropout_keep_prob, use_residual_conn=use_residual_conn)\n output = layer_norm_residual_conn.layer_norm_residual_connection()\n return output\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000430", "length_bytes": 4426, "license_type": "permissive", "methods": [{"docstring": ":param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]", "name": "__init__", "signature": "def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None)"}, {"docstring": "position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]", "name": "sub_layer_postion_wise_feed_forward", "signature": "def sub_layer_postion_wise_feed_forward(self, x, layer_index)"}, {"docstring": "multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]", "name": "sub_layer_multi_head_attention", "signature": "def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9)"}, {"docstring": "layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:", "name": "sub_layer_layer_norm_residual_connection", "signature": "def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1')"}], "n_methods": 4, "prompt": "Implement the Python class `BaseClass` described below.\n\nClass description:\nbase class has some common fields and functions.\n\nMethod signatures and docstrings:\n- def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None): :param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\n- def sub_layer_postion_wise_feed_forward(self, x, layer_index): position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\n- def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9): multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\n- def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'): layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:", "prompted_full_text": "Implement the Python class `BaseClass` described below.\n\nClass description:\nbase class has some common fields and functions.\n\nMethod signatures and docstrings:\n- def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None): :param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\n- def sub_layer_postion_wise_feed_forward(self, x, layer_index): position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\n- def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9): multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\n- def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'): layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:\n\n<|skeleton|>\nclass BaseClass:\n \"\"\"base class has some common fields and functions.\"\"\"\n\n def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None):\n \"\"\":param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\"\"\"\n <|body_0|>\n\n def sub_layer_postion_wise_feed_forward(self, x, layer_index):\n \"\"\"position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\"\"\"\n <|body_1|>\n\n def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9):\n \"\"\"multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\"\"\"\n <|body_2|>\n\n def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'):\n \"\"\"layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.sequence_length = sequence_length\n self.h = h\n self.num_layer = num_layer\n self.batch_size = batch_size\n self.decoder_sent_length = decoder_sent_length\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope('sub_layer_postion_wise_feed_forward' + str(layer_index)):\n postion_wise_feed_forward = PositionWiseFeedFoward(x, layer_index, d_model=self.d_model, d_ff=self.d_model * 4)\n postion_wise_feed_forward_output = postion_wise_feed_forward.position_wise_feed_forward_fn()\n return postion_wise_feed_forward_output\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('base_mode_sub_layer_multi_head_attention_' + str(layer_index)):\n multi_head_attention_class = MultiHeadAttention(Q, K_s, V_s, self.d_model, self.d_k, self.d_v, self.sequence_length, self.h, is_training=is_training, mask=mask, dropout_rate=1.0 - dropout_keep_prob)\n sub_layer_multi_head_attention_output = multi_head_attention_class.multi_head_attention_fn()\n return sub_layer_multi_head_attention_output\n<|end_body_2|>\n\n<|body_start_3|>\n variable_scope = 'sub_layer_layer_norm_residual_connection_' + str(layer_index) + '_' + sub_layer_name\n with tf.variable_scope(variable_scope):\n layer_norm_residual_conn = LayerNormResidualConnection(layer_input, layer_output, layer_index, residual_dropout=1 - dropout_keep_prob, use_residual_conn=use_residual_conn)\n output = layer_norm_residual_conn.layer_norm_residual_connection()\n return output\n<|end_body_3|>\n", "revision_id": "480c909e0835a455606e829310ff949c9dd23549", "skeleton": "<|skeleton|>\nclass BaseClass:\n \"\"\"base class has some common fields and functions.\"\"\"\n\n def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None):\n \"\"\":param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\"\"\"\n <|body_0|>\n\n def sub_layer_postion_wise_feed_forward(self, x, layer_index):\n \"\"\"position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\"\"\"\n <|body_1|>\n\n def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9):\n \"\"\"multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\"\"\"\n <|body_2|>\n\n def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'):\n \"\"\"layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseClass:\n \"\"\"base class has some common fields and functions.\"\"\"\n\n def __init__(self, d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=6, decoder_sent_length=None):\n \"\"\":param d_model: :param d_k: :param d_v: :param sequence_length: :param h: :param batch_size: :param embedded_words: shape:[batch_size,sequence_length,embed_size]\"\"\"\n self.d_model = d_model\n self.d_k = d_k\n self.d_v = d_v\n self.sequence_length = sequence_length\n self.h = h\n self.num_layer = num_layer\n self.batch_size = batch_size\n self.decoder_sent_length = decoder_sent_length\n\n def sub_layer_postion_wise_feed_forward(self, x, layer_index):\n \"\"\"position-wise feed forward. you can implement it as feed forward network, or two layers of CNN. :param x: shape should be:[batch_size,sequence_length,d_model] :param layer_index: index of layer number :return: [batch_size,sequence_length,d_model]\"\"\"\n with tf.variable_scope('sub_layer_postion_wise_feed_forward' + str(layer_index)):\n postion_wise_feed_forward = PositionWiseFeedFoward(x, layer_index, d_model=self.d_model, d_ff=self.d_model * 4)\n postion_wise_feed_forward_output = postion_wise_feed_forward.position_wise_feed_forward_fn()\n return postion_wise_feed_forward_output\n\n def sub_layer_multi_head_attention(self, layer_index, Q, K_s, V_s, mask=None, is_training=None, dropout_keep_prob=0.9):\n \"\"\"multi head attention as sub layer :param layer_index: index of layer number :param Q: shape should be: [batch_size,sequence_length,embed_size] :param k_s: shape should be: [batch_size,sequence_length,embed_size] :param mask: when use mask,illegal connection will be mask as huge big negative value.so it's possiblitity will become zero. :return: output of multi head attention.shape:[batch_size,sequence_length,d_model]\"\"\"\n with tf.variable_scope('base_mode_sub_layer_multi_head_attention_' + str(layer_index)):\n multi_head_attention_class = MultiHeadAttention(Q, K_s, V_s, self.d_model, self.d_k, self.d_v, self.sequence_length, self.h, is_training=is_training, mask=mask, dropout_rate=1.0 - dropout_keep_prob)\n sub_layer_multi_head_attention_output = multi_head_attention_class.multi_head_attention_fn()\n return sub_layer_multi_head_attention_output\n\n def sub_layer_layer_norm_residual_connection(self, layer_input, layer_output, layer_index, dropout_keep_prob=0.9, use_residual_conn=True, sub_layer_name='layer1'):\n \"\"\"layer norm & residual connection :param input: [batch_size,equence_length,d_model] :param output:[batch_size,sequence_length,d_model] :return:\"\"\"\n variable_scope = 'sub_layer_layer_norm_residual_connection_' + str(layer_index) + '_' + sub_layer_name\n with tf.variable_scope(variable_scope):\n layer_norm_residual_conn = LayerNormResidualConnection(layer_input, layer_output, layer_index, residual_dropout=1 - dropout_keep_prob, use_residual_conn=use_residual_conn)\n output = layer_norm_residual_conn.layer_norm_residual_connection()\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "bert_language_understanding-master/bert_language_understanding-master/model/base_model.py", "source_repo": "yyht/BERT", "split": "val", "star_events_count": 37} {"blob_id": "4a401be21cfc60bd1ec789745bab21848f25b039", "bodies": ["global DataBase\njson_data = lottery_ns.payload\nif ticket_num not in DataBase:\n ticket_fields = json_data['fields']\n ticket_price = json_data['price']\n if len(ticket_fields) != 3:\n return ('Количество полей в билете должно быть равно 3', 400)\n new_ticket = Ticket(ticket_num, ticket_fields, ticket_price)\n DataBase[ticket_num] = new_ticket\n return new_ticket.jsonify()\nelse:\n return ('Билет с этим номером уже есть в базе', 400)", "global DataBase\nif ticket_num in DataBase:\n return DataBase[ticket_num].jsonify()\nelse:\n return ('Лотерейный билет не найден', 404)", "global DataBase\nif ticket_num in DataBase:\n DataBase[ticket_num].sell()\n return DataBase[ticket_num].jsonify()\nelse:\n lottery_ns.abort(404)"], "bodies_text": "<|body_start_0|>\n global DataBase\n json_data = lottery_ns.payload\n if ticket_num not in DataBase:\n ticket_fields = json_data['fields']\n ticket_price = json_data['price']\n if len(ticket_fields) != 3:\n return ('Количество полей в билете должно быть равно 3', 400)\n new_ticket = Ticket(ticket_num, ticket_fields, ticket_price)\n DataBase[ticket_num] = new_ticket\n return new_ticket.jsonify()\n else:\n return ('Билет с этим номером уже есть в базе', 400)\n<|end_body_0|>\n\n<|body_start_1|>\n global DataBase\n if ticket_num in DataBase:\n return DataBase[ticket_num].jsonify()\n else:\n return ('Лотерейный билет не найден', 404)\n<|end_body_1|>\n\n<|body_start_2|>\n global DataBase\n if ticket_num in DataBase:\n DataBase[ticket_num].sell()\n return DataBase[ticket_num].jsonify()\n else:\n lottery_ns.abort(404)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TicketResource", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TicketResource:\n\n def post(self, ticket_num):\n \"\"\"Метод для внесения лотирейного билета в базу\"\"\"\n <|body_0|>\n\n def get(self, ticket_num):\n \"\"\"Метод для получения информации о лотирейном билете\"\"\"\n <|body_1|>\n\n def patch(self, ticket_num):\n \"\"\"Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global DataBase\n json_data = lottery_ns.payload\n if ticket_num not in DataBase:\n ticket_fields = json_data['fields']\n ticket_price = json_data['price']\n if len(ticket_fields) != 3:\n return ('Количество полей в билете должно быть равно 3', 400)\n new_ticket = Ticket(ticket_num, ticket_fields, ticket_price)\n DataBase[ticket_num] = new_ticket\n return new_ticket.jsonify()\n else:\n return ('Билет с этим номером уже есть в базе', 400)\n<|end_body_0|>\n\n<|body_start_1|>\n global DataBase\n if ticket_num in DataBase:\n return DataBase[ticket_num].jsonify()\n else:\n return ('Лотерейный билет не найден', 404)\n<|end_body_1|>\n\n<|body_start_2|>\n global DataBase\n if ticket_num in DataBase:\n DataBase[ticket_num].sell()\n return DataBase[ticket_num].jsonify()\n else:\n lottery_ns.abort(404)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000431", "length_bytes": 3037, "license_type": "no_license", "methods": [{"docstring": "Метод для внесения лотирейного билета в базу", "name": "post", "signature": "def post(self, ticket_num)"}, {"docstring": "Метод для получения информации о лотирейном билете", "name": "get", "signature": "def get(self, ticket_num)"}, {"docstring": "Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)", "name": "patch", "signature": "def patch(self, ticket_num)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000838", "prompt": "Implement the Python class `TicketResource` described below.\n\nClass description:\nImplement the TicketResource class.\n\nMethod signatures and docstrings:\n- def post(self, ticket_num): Метод для внесения лотирейного билета в базу\n- def get(self, ticket_num): Метод для получения информации о лотирейном билете\n- def patch(self, ticket_num): Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)", "prompted_full_text": "Implement the Python class `TicketResource` described below.\n\nClass description:\nImplement the TicketResource class.\n\nMethod signatures and docstrings:\n- def post(self, ticket_num): Метод для внесения лотирейного билета в базу\n- def get(self, ticket_num): Метод для получения информации о лотирейном билете\n- def patch(self, ticket_num): Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)\n\n<|skeleton|>\nclass TicketResource:\n\n def post(self, ticket_num):\n \"\"\"Метод для внесения лотирейного билета в базу\"\"\"\n <|body_0|>\n\n def get(self, ticket_num):\n \"\"\"Метод для получения информации о лотирейном билете\"\"\"\n <|body_1|>\n\n def patch(self, ticket_num):\n \"\"\"Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global DataBase\n json_data = lottery_ns.payload\n if ticket_num not in DataBase:\n ticket_fields = json_data['fields']\n ticket_price = json_data['price']\n if len(ticket_fields) != 3:\n return ('Количество полей в билете должно быть равно 3', 400)\n new_ticket = Ticket(ticket_num, ticket_fields, ticket_price)\n DataBase[ticket_num] = new_ticket\n return new_ticket.jsonify()\n else:\n return ('Билет с этим номером уже есть в базе', 400)\n<|end_body_0|>\n\n<|body_start_1|>\n global DataBase\n if ticket_num in DataBase:\n return DataBase[ticket_num].jsonify()\n else:\n return ('Лотерейный билет не найден', 404)\n<|end_body_1|>\n\n<|body_start_2|>\n global DataBase\n if ticket_num in DataBase:\n DataBase[ticket_num].sell()\n return DataBase[ticket_num].jsonify()\n else:\n lottery_ns.abort(404)\n<|end_body_2|>\n", "revision_id": "c61442cf1c0fd383a959ed607c0bf6b39323fd06", "skeleton": "<|skeleton|>\nclass TicketResource:\n\n def post(self, ticket_num):\n \"\"\"Метод для внесения лотирейного билета в базу\"\"\"\n <|body_0|>\n\n def get(self, ticket_num):\n \"\"\"Метод для получения информации о лотирейном билете\"\"\"\n <|body_1|>\n\n def patch(self, ticket_num):\n \"\"\"Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TicketResource:\n def post(self, ticket_num):\n \"\"\"Метод для внесения лотирейного билета в базу\"\"\"\n global DataBase\n json_data = lottery_ns.payload\n if ticket_num not in DataBase:\n ticket_fields = json_data['fields']\n ticket_price = json_data['price']\n if len(ticket_fields) != 3:\n return ('Количество полей в билете должно быть равно 3', 400)\n new_ticket = Ticket(ticket_num, ticket_fields, ticket_price)\n DataBase[ticket_num] = new_ticket\n return new_ticket.jsonify()\n else:\n return ('Билет с этим номером уже есть в базе', 400)\n\n def get(self, ticket_num):\n \"\"\"Метод для получения информации о лотирейном билете\"\"\"\n global DataBase\n if ticket_num in DataBase:\n return DataBase[ticket_num].jsonify()\n else:\n return ('Лотерейный билет не найден', 404)\n\n def patch(self, ticket_num):\n \"\"\"Метод для изменения статуса билета (вызвается во время покупки билета в ларьке)\"\"\"\n global DataBase\n if ticket_num in DataBase:\n DataBase[ticket_num].sell()\n return DataBase[ticket_num].jsonify()\n else:\n lottery_ns.abort(404)\n", "source": "the_stack_v2_python_sparse", "source_path": "flaskapp/lottery/lottery.py", "source_repo": "hisa-prog/lab4", "split": "val", "star_events_count": 0} {"blob_id": "f202d324831654600cd27d22b6b610efccfe9cf5", "bodies": ["super(_RFCN_header, self).__init__(input_dim, n_classes, class_ag)\nself.position_sensitive_score_map = nn.Conv2d(input_dim, k ** 2 * n_classes, kernel_size=1)\nif class_ag:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4, kernel_size=1)\nelse:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4 * n_classes, kernel_size=1)\nself.k = k", "feat = x[0]\nrois = x[1]\nscore_map = self.position_sensitive_score_map(feat)\nbbox_map = self.position_sensitive_bbox_map(feat)"], "bodies_text": "<|body_start_0|>\n super(_RFCN_header, self).__init__(input_dim, n_classes, class_ag)\n self.position_sensitive_score_map = nn.Conv2d(input_dim, k ** 2 * n_classes, kernel_size=1)\n if class_ag:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4, kernel_size=1)\n else:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4 * n_classes, kernel_size=1)\n self.k = k\n<|end_body_0|>\n\n<|body_start_1|>\n feat = x[0]\n rois = x[1]\n score_map = self.position_sensitive_score_map(feat)\n bbox_map = self.position_sensitive_bbox_map(feat)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "_RFCN_header", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _RFCN_header:\n\n def __init__(self, input_dim, n_classes, class_ag, k=3):\n \"\"\":param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\":param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(_RFCN_header, self).__init__(input_dim, n_classes, class_ag)\n self.position_sensitive_score_map = nn.Conv2d(input_dim, k ** 2 * n_classes, kernel_size=1)\n if class_ag:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4, kernel_size=1)\n else:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4 * n_classes, kernel_size=1)\n self.k = k\n<|end_body_0|>\n\n<|body_start_1|>\n feat = x[0]\n rois = x[1]\n score_map = self.position_sensitive_score_map(feat)\n bbox_map = self.position_sensitive_bbox_map(feat)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000432", "length_bytes": 1501, "license_type": "permissive", "methods": [{"docstring": ":param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size", "name": "__init__", "signature": "def __init__(self, input_dim, n_classes, class_ag, k=3)"}, {"docstring": ":param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006563", "prompt": "Implement the Python class `_RFCN_header` described below.\n\nClass description:\nImplement the _RFCN_header class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, n_classes, class_ag, k=3): :param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\n- def forward(self, x): :param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:", "prompted_full_text": "Implement the Python class `_RFCN_header` described below.\n\nClass description:\nImplement the _RFCN_header class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim, n_classes, class_ag, k=3): :param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\n- def forward(self, x): :param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:\n\n<|skeleton|>\nclass _RFCN_header:\n\n def __init__(self, input_dim, n_classes, class_ag, k=3):\n \"\"\":param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\":param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(_RFCN_header, self).__init__(input_dim, n_classes, class_ag)\n self.position_sensitive_score_map = nn.Conv2d(input_dim, k ** 2 * n_classes, kernel_size=1)\n if class_ag:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4, kernel_size=1)\n else:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4 * n_classes, kernel_size=1)\n self.k = k\n<|end_body_0|>\n\n<|body_start_1|>\n feat = x[0]\n rois = x[1]\n score_map = self.position_sensitive_score_map(feat)\n bbox_map = self.position_sensitive_bbox_map(feat)\n<|end_body_1|>\n", "revision_id": "f66c38c00405b22cb746cc3f5c38d2b49f77d854", "skeleton": "<|skeleton|>\nclass _RFCN_header:\n\n def __init__(self, input_dim, n_classes, class_ag, k=3):\n \"\"\":param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\":param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _RFCN_header:\n def __init__(self, input_dim, n_classes, class_ag, k=3):\n \"\"\":param input_dim: feature map channel number :param n_classes: :param class_ag: :param k: grid size\"\"\"\n super(_RFCN_header, self).__init__(input_dim, n_classes, class_ag)\n self.position_sensitive_score_map = nn.Conv2d(input_dim, k ** 2 * n_classes, kernel_size=1)\n if class_ag:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4, kernel_size=1)\n else:\n self.position_sensitive_bbox_map = nn.Conv2d(input_dim, k ** 2 * 4 * n_classes, kernel_size=1)\n self.k = k\n\n def forward(self, x):\n \"\"\":param feat: [batch_size, channel, H, W] :param rois: [batch_size, num, 5] :return:\"\"\"\n feat = x[0]\n rois = x[1]\n score_map = self.position_sensitive_score_map(feat)\n bbox_map = self.position_sensitive_bbox_map(feat)\n", "source": "the_stack_v2_python_sparse", "source_path": "build/lib.linux-x86_64-3.5/model/header/RFCN.py", "source_repo": "moli1026/regrad", "split": "val", "star_events_count": 1} {"blob_id": "a626a9a56a02f87d97b7dc242bbcc957c99f69ad", "bodies": ["DenseVectorPrf.__init__(self)\nself.alpha = alpha\nself.beta = beta\nself.gamma = gamma\nself.topk = topk\nself.bottomk = bottomk", "all_candidate_embs = [item.vectors for item in prf_candidates]\nweighted_query_embs = self.alpha * emb_qs\nweighted_mean_pos_doc_embs = self.beta * np.mean(all_candidate_embs[:self.topk], axis=0)\nnew_emb_q = weighted_query_embs + weighted_mean_pos_doc_embs\nif self.bottomk > 0:\n weighted_mean_neg_doc_embs = self.gamma * np.mean(all_candidate_embs[-self.bottomk:], axis=0)\n new_emb_q -= weighted_mean_neg_doc_embs\nreturn new_emb_q", "qids = list()\nnew_emb_qs = list()\nfor index, topic_id in enumerate(topic_ids):\n qids.append(topic_id)\n new_emb_qs.append(self.get_prf_q_emb(emb_qs[index], prf_candidates[topic_id]))\nnew_emb_qs = np.array(new_emb_qs).astype('float32')\nreturn new_emb_qs"], "bodies_text": "<|body_start_0|>\n DenseVectorPrf.__init__(self)\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.topk = topk\n self.bottomk = bottomk\n<|end_body_0|>\n\n<|body_start_1|>\n all_candidate_embs = [item.vectors for item in prf_candidates]\n weighted_query_embs = self.alpha * emb_qs\n weighted_mean_pos_doc_embs = self.beta * np.mean(all_candidate_embs[:self.topk], axis=0)\n new_emb_q = weighted_query_embs + weighted_mean_pos_doc_embs\n if self.bottomk > 0:\n weighted_mean_neg_doc_embs = self.gamma * np.mean(all_candidate_embs[-self.bottomk:], axis=0)\n new_emb_q -= weighted_mean_neg_doc_embs\n return new_emb_q\n<|end_body_1|>\n\n<|body_start_2|>\n qids = list()\n new_emb_qs = list()\n for index, topic_id in enumerate(topic_ids):\n qids.append(topic_id)\n new_emb_qs.append(self.get_prf_q_emb(emb_qs[index], prf_candidates[topic_id]))\n new_emb_qs = np.array(new_emb_qs).astype('float32')\n return new_emb_qs\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DenseVectorRocchioPrf", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DenseVectorRocchioPrf:\n\n def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int):\n \"\"\"Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\"\"\"\n <|body_0|>\n\n def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_1|>\n\n def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DenseVectorPrf.__init__(self)\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.topk = topk\n self.bottomk = bottomk\n<|end_body_0|>\n\n<|body_start_1|>\n all_candidate_embs = [item.vectors for item in prf_candidates]\n weighted_query_embs = self.alpha * emb_qs\n weighted_mean_pos_doc_embs = self.beta * np.mean(all_candidate_embs[:self.topk], axis=0)\n new_emb_q = weighted_query_embs + weighted_mean_pos_doc_embs\n if self.bottomk > 0:\n weighted_mean_neg_doc_embs = self.gamma * np.mean(all_candidate_embs[-self.bottomk:], axis=0)\n new_emb_q -= weighted_mean_neg_doc_embs\n return new_emb_q\n<|end_body_1|>\n\n<|body_start_2|>\n qids = list()\n new_emb_qs = list()\n for index, topic_id in enumerate(topic_ids):\n qids.append(topic_id)\n new_emb_qs.append(self.get_prf_q_emb(emb_qs[index], prf_candidates[topic_id]))\n new_emb_qs = np.array(new_emb_qs).astype('float32')\n return new_emb_qs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000433", "length_bytes": 7539, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.", "name": "__init__", "signature": "def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int)"}, {"docstring": "Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings", "name": "get_prf_q_emb", "signature": "def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None)"}, {"docstring": "Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings", "name": "get_batch_prf_q_emb", "signature": "def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000741", "prompt": "Implement the Python class `DenseVectorRocchioPrf` described below.\n\nClass description:\nImplement the DenseVectorRocchioPrf class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int): Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\n- def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None): Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\n- def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None): Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings", "prompted_full_text": "Implement the Python class `DenseVectorRocchioPrf` described below.\n\nClass description:\nImplement the DenseVectorRocchioPrf class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int): Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\n- def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None): Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\n- def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None): Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\n\n<|skeleton|>\nclass DenseVectorRocchioPrf:\n\n def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int):\n \"\"\"Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\"\"\"\n <|body_0|>\n\n def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_1|>\n\n def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DenseVectorPrf.__init__(self)\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.topk = topk\n self.bottomk = bottomk\n<|end_body_0|>\n\n<|body_start_1|>\n all_candidate_embs = [item.vectors for item in prf_candidates]\n weighted_query_embs = self.alpha * emb_qs\n weighted_mean_pos_doc_embs = self.beta * np.mean(all_candidate_embs[:self.topk], axis=0)\n new_emb_q = weighted_query_embs + weighted_mean_pos_doc_embs\n if self.bottomk > 0:\n weighted_mean_neg_doc_embs = self.gamma * np.mean(all_candidate_embs[-self.bottomk:], axis=0)\n new_emb_q -= weighted_mean_neg_doc_embs\n return new_emb_q\n<|end_body_1|>\n\n<|body_start_2|>\n qids = list()\n new_emb_qs = list()\n for index, topic_id in enumerate(topic_ids):\n qids.append(topic_id)\n new_emb_qs.append(self.get_prf_q_emb(emb_qs[index], prf_candidates[topic_id]))\n new_emb_qs = np.array(new_emb_qs).astype('float32')\n return new_emb_qs\n<|end_body_2|>\n", "revision_id": "42b354914b230880c91b2e4e70605b472441a9a1", "skeleton": "<|skeleton|>\nclass DenseVectorRocchioPrf:\n\n def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int):\n \"\"\"Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\"\"\"\n <|body_0|>\n\n def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_1|>\n\n def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DenseVectorRocchioPrf:\n def __init__(self, alpha: float, beta: float, gamma: float, topk: int, bottomk: int):\n \"\"\"Parameters ---------- alpha : float Rocchio parameter, controls the weight assigned to the original query embedding. beta : float Rocchio parameter, controls the weight assigned to the positive document embeddings. gamma : float Rocchio parameter, controls the weight assigned to the negative document embeddings. topk : int Rocchio parameter, set topk documents as positive document feedbacks. bottomk : int Rocchio parameter, set bottomk documents as negative document feedbacks.\"\"\"\n DenseVectorPrf.__init__(self)\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.topk = topk\n self.bottomk = bottomk\n\n def get_prf_q_emb(self, emb_qs: np.ndarray=None, prf_candidates: List[PRFDenseSearchResult]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- emb_qs : np.ndarray query embedding prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n all_candidate_embs = [item.vectors for item in prf_candidates]\n weighted_query_embs = self.alpha * emb_qs\n weighted_mean_pos_doc_embs = self.beta * np.mean(all_candidate_embs[:self.topk], axis=0)\n new_emb_q = weighted_query_embs + weighted_mean_pos_doc_embs\n if self.bottomk > 0:\n weighted_mean_neg_doc_embs = self.gamma * np.mean(all_candidate_embs[-self.bottomk:], axis=0)\n new_emb_q -= weighted_mean_neg_doc_embs\n return new_emb_q\n\n def get_batch_prf_q_emb(self, topic_ids: List[str]=None, emb_qs: np.ndarray=None, prf_candidates: Dict[str, List[PRFDenseSearchResult]]=None):\n \"\"\"Perform Rocchio PRF with Dense Vectors Parameters ---------- topic_ids : List[str] List of topic ids. emb_qs : np.ndarray Query embeddings prf_candidates : List[PRFDenseSearchResult] List of PRFDenseSearchResult, contains document embeddings. Returns ------- np.ndarray return new query embeddings\"\"\"\n qids = list()\n new_emb_qs = list()\n for index, topic_id in enumerate(topic_ids):\n qids.append(topic_id)\n new_emb_qs.append(self.get_prf_q_emb(emb_qs[index], prf_candidates[topic_id]))\n new_emb_qs = np.array(new_emb_qs).astype('float32')\n return new_emb_qs\n", "source": "the_stack_v2_python_sparse", "source_path": "pyserini/search/faiss/_prf.py", "source_repo": "castorini/pyserini", "split": "val", "star_events_count": 1070} {"blob_id": "5d2f370d683940fbc8d0ebda5f3087c824c3d769", "bodies": ["BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c2] Fake bad commit\\ngit bisect bad c2\\n# first bad commit: [c2] Fake bad commit\\n'\ngit = MockableGitController()\ngit._git = MagicMock(return_value=(0, '', ''))\ngit.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit description'), (CommitHash('c2'), 'Commit description')])\ngit.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\nsession = BisectSession(git, cache_keys=set(['c1', 'c2']))\nresult: BisectLog = session.run('c1', 'c2', ['fast_script.sh'])\nself.assertEqual(result.candidates, ['c2'])\ngit._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c2'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset')])\ngit.bisect_log.assert_called_once_with()\ngit.bisect_view.assert_called_once_with()", "BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# skip: [c2] Skipped commit\\ngit bisect skip c2\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c3] Fake bad commit\\ngit bisect bad c3\\n# only skipped commits left to test\\n# possible first bad commit: [c2] Skipped commit\\n# possible first bad commit: [c3] Fake bad commit\\n'\ngit = MockableGitController()\ngit._git = MagicMock(return_value=(0, '', ''))\ngit.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Fake good commit'), (CommitHash('c2'), 'Skipped commit'), (CommitHash('c3'), 'Fake bad commit')])\ngit.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\nsession = BisectSession(git, cache_keys=set(['c1', 'c3']))\nbisect_log = session.run('c1', 'c3', ['fast_script.sh'])\nself.assertEqual(bisect_log.candidates, ['c2', 'c3'])\ngit._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c3'), call('bisect', 'skip', 'c2'), call('bisect', 'run', 'fast_script.sh')])\ngit.bisect_log.assert_called_once_with()\ngit.bisect_view.assert_called_once_with()", "BISECT_LOG_1 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# skip: [c3] Commit 3\\ngit bisect skip c3\\n# skip: [c4] Commit 4\\ngit bisect skip c4\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# only skipped commits left to test\\n# possible first bad commit: [c3] Commit 3\\n# possible first bad commit: [c4] Commit 4\\n'\nBISECT_LOG_2 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# good: [c3] Commit 3\\ngit bisect good c3\\n# bad: [c4] Commit 4\\ngit bisect bad c4\\n# first bad commit: [c4] Commit 4\\n'\nparsed_bisect_logs = [BisectLog(BISECT_LOG_1), BisectLog(BISECT_LOG_2)]\ngit = MockableGitController()\ngit._git = MagicMock(return_value=(0, '', ''))\ngit.bisect_log = MagicMock(side_effect=parsed_bisect_logs)\ngit.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit 1'), (CommitHash('c2'), 'Commit 2'), (CommitHash('c3'), 'Commit 3'), (CommitHash('c4'), 'Commit 4'), (CommitHash('c5'), 'Commit 5')])\nsession = BisectSession(git, cache_keys=set(['c1', 'c2']))\nbisect_log = session.run('c1', 'c5', ['fast_script.sh'], ['slow_script.sh'])\nself.assertEqual(bisect_log.candidates, ['c4'])\ngit._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'skip', 'c3', 'c4', 'c5'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset'), call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'good', 'c2'), call('bisect', 'run', 'slow_script.sh')])\ngit.bisect_log.assert_has_calls([call(), call()])\ngit.bisect_view.assert_called_once_with()"], "bodies_text": "<|body_start_0|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c2] Fake bad commit\\ngit bisect bad c2\\n# first bad commit: [c2] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit description'), (CommitHash('c2'), 'Commit description')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n result: BisectLog = session.run('c1', 'c2', ['fast_script.sh'])\n self.assertEqual(result.candidates, ['c2'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c2'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_0|>\n\n<|body_start_1|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# skip: [c2] Skipped commit\\ngit bisect skip c2\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c3] Fake bad commit\\ngit bisect bad c3\\n# only skipped commits left to test\\n# possible first bad commit: [c2] Skipped commit\\n# possible first bad commit: [c3] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Fake good commit'), (CommitHash('c2'), 'Skipped commit'), (CommitHash('c3'), 'Fake bad commit')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c3']))\n bisect_log = session.run('c1', 'c3', ['fast_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c2', 'c3'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c3'), call('bisect', 'skip', 'c2'), call('bisect', 'run', 'fast_script.sh')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_1|>\n\n<|body_start_2|>\n BISECT_LOG_1 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# skip: [c3] Commit 3\\ngit bisect skip c3\\n# skip: [c4] Commit 4\\ngit bisect skip c4\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# only skipped commits left to test\\n# possible first bad commit: [c3] Commit 3\\n# possible first bad commit: [c4] Commit 4\\n'\n BISECT_LOG_2 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# good: [c3] Commit 3\\ngit bisect good c3\\n# bad: [c4] Commit 4\\ngit bisect bad c4\\n# first bad commit: [c4] Commit 4\\n'\n parsed_bisect_logs = [BisectLog(BISECT_LOG_1), BisectLog(BISECT_LOG_2)]\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_log = MagicMock(side_effect=parsed_bisect_logs)\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit 1'), (CommitHash('c2'), 'Commit 2'), (CommitHash('c3'), 'Commit 3'), (CommitHash('c4'), 'Commit 4'), (CommitHash('c5'), 'Commit 5')])\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n bisect_log = session.run('c1', 'c5', ['fast_script.sh'], ['slow_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c4'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'skip', 'c3', 'c4', 'c5'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset'), call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'good', 'c2'), call('bisect', 'run', 'slow_script.sh')])\n git.bisect_log.assert_has_calls([call(), call()])\n git.bisect_view.assert_called_once_with()\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestBisectSession", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestBisectSession:\n\n def test_two_cached_only_fast(self):\n \"\"\"Simple case with two cached commits and only the fast command.\"\"\"\n <|body_0|>\n\n def test_only_fast(self):\n \"\"\"Mix of cached and uncached with only the fast command.\"\"\"\n <|body_1|>\n\n def test_fast_and_slow(self):\n \"\"\"Mix of cached and uncached with fast and slow command.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c2] Fake bad commit\\ngit bisect bad c2\\n# first bad commit: [c2] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit description'), (CommitHash('c2'), 'Commit description')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n result: BisectLog = session.run('c1', 'c2', ['fast_script.sh'])\n self.assertEqual(result.candidates, ['c2'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c2'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_0|>\n\n<|body_start_1|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# skip: [c2] Skipped commit\\ngit bisect skip c2\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c3] Fake bad commit\\ngit bisect bad c3\\n# only skipped commits left to test\\n# possible first bad commit: [c2] Skipped commit\\n# possible first bad commit: [c3] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Fake good commit'), (CommitHash('c2'), 'Skipped commit'), (CommitHash('c3'), 'Fake bad commit')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c3']))\n bisect_log = session.run('c1', 'c3', ['fast_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c2', 'c3'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c3'), call('bisect', 'skip', 'c2'), call('bisect', 'run', 'fast_script.sh')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_1|>\n\n<|body_start_2|>\n BISECT_LOG_1 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# skip: [c3] Commit 3\\ngit bisect skip c3\\n# skip: [c4] Commit 4\\ngit bisect skip c4\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# only skipped commits left to test\\n# possible first bad commit: [c3] Commit 3\\n# possible first bad commit: [c4] Commit 4\\n'\n BISECT_LOG_2 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# good: [c3] Commit 3\\ngit bisect good c3\\n# bad: [c4] Commit 4\\ngit bisect bad c4\\n# first bad commit: [c4] Commit 4\\n'\n parsed_bisect_logs = [BisectLog(BISECT_LOG_1), BisectLog(BISECT_LOG_2)]\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_log = MagicMock(side_effect=parsed_bisect_logs)\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit 1'), (CommitHash('c2'), 'Commit 2'), (CommitHash('c3'), 'Commit 3'), (CommitHash('c4'), 'Commit 4'), (CommitHash('c5'), 'Commit 5')])\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n bisect_log = session.run('c1', 'c5', ['fast_script.sh'], ['slow_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c4'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'skip', 'c3', 'c4', 'c5'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset'), call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'good', 'c2'), call('bisect', 'run', 'slow_script.sh')])\n git.bisect_log.assert_has_calls([call(), call()])\n git.bisect_view.assert_called_once_with()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000434", "length_bytes": 13456, "license_type": "permissive", "methods": [{"docstring": "Simple case with two cached commits and only the fast command.", "name": "test_two_cached_only_fast", "signature": "def test_two_cached_only_fast(self)"}, {"docstring": "Mix of cached and uncached with only the fast command.", "name": "test_only_fast", "signature": "def test_only_fast(self)"}, {"docstring": "Mix of cached and uncached with fast and slow command.", "name": "test_fast_and_slow", "signature": "def test_fast_and_slow(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005995", "prompt": "Implement the Python class `TestBisectSession` described below.\n\nClass description:\nImplement the TestBisectSession class.\n\nMethod signatures and docstrings:\n- def test_two_cached_only_fast(self): Simple case with two cached commits and only the fast command.\n- def test_only_fast(self): Mix of cached and uncached with only the fast command.\n- def test_fast_and_slow(self): Mix of cached and uncached with fast and slow command.", "prompted_full_text": "Implement the Python class `TestBisectSession` described below.\n\nClass description:\nImplement the TestBisectSession class.\n\nMethod signatures and docstrings:\n- def test_two_cached_only_fast(self): Simple case with two cached commits and only the fast command.\n- def test_only_fast(self): Mix of cached and uncached with only the fast command.\n- def test_fast_and_slow(self): Mix of cached and uncached with fast and slow command.\n\n<|skeleton|>\nclass TestBisectSession:\n\n def test_two_cached_only_fast(self):\n \"\"\"Simple case with two cached commits and only the fast command.\"\"\"\n <|body_0|>\n\n def test_only_fast(self):\n \"\"\"Mix of cached and uncached with only the fast command.\"\"\"\n <|body_1|>\n\n def test_fast_and_slow(self):\n \"\"\"Mix of cached and uncached with fast and slow command.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c2] Fake bad commit\\ngit bisect bad c2\\n# first bad commit: [c2] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit description'), (CommitHash('c2'), 'Commit description')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n result: BisectLog = session.run('c1', 'c2', ['fast_script.sh'])\n self.assertEqual(result.candidates, ['c2'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c2'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_0|>\n\n<|body_start_1|>\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# skip: [c2] Skipped commit\\ngit bisect skip c2\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c3] Fake bad commit\\ngit bisect bad c3\\n# only skipped commits left to test\\n# possible first bad commit: [c2] Skipped commit\\n# possible first bad commit: [c3] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Fake good commit'), (CommitHash('c2'), 'Skipped commit'), (CommitHash('c3'), 'Fake bad commit')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c3']))\n bisect_log = session.run('c1', 'c3', ['fast_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c2', 'c3'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c3'), call('bisect', 'skip', 'c2'), call('bisect', 'run', 'fast_script.sh')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n<|end_body_1|>\n\n<|body_start_2|>\n BISECT_LOG_1 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# skip: [c3] Commit 3\\ngit bisect skip c3\\n# skip: [c4] Commit 4\\ngit bisect skip c4\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# only skipped commits left to test\\n# possible first bad commit: [c3] Commit 3\\n# possible first bad commit: [c4] Commit 4\\n'\n BISECT_LOG_2 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# good: [c3] Commit 3\\ngit bisect good c3\\n# bad: [c4] Commit 4\\ngit bisect bad c4\\n# first bad commit: [c4] Commit 4\\n'\n parsed_bisect_logs = [BisectLog(BISECT_LOG_1), BisectLog(BISECT_LOG_2)]\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_log = MagicMock(side_effect=parsed_bisect_logs)\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit 1'), (CommitHash('c2'), 'Commit 2'), (CommitHash('c3'), 'Commit 3'), (CommitHash('c4'), 'Commit 4'), (CommitHash('c5'), 'Commit 5')])\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n bisect_log = session.run('c1', 'c5', ['fast_script.sh'], ['slow_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c4'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'skip', 'c3', 'c4', 'c5'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset'), call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'good', 'c2'), call('bisect', 'run', 'slow_script.sh')])\n git.bisect_log.assert_has_calls([call(), call()])\n git.bisect_view.assert_called_once_with()\n<|end_body_2|>\n", "revision_id": "51f6017b8425b14d5a4aa9abace8fe5a25ef08c8", "skeleton": "<|skeleton|>\nclass TestBisectSession:\n\n def test_two_cached_only_fast(self):\n \"\"\"Simple case with two cached commits and only the fast command.\"\"\"\n <|body_0|>\n\n def test_only_fast(self):\n \"\"\"Mix of cached and uncached with only the fast command.\"\"\"\n <|body_1|>\n\n def test_fast_and_slow(self):\n \"\"\"Mix of cached and uncached with fast and slow command.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestBisectSession:\n def test_two_cached_only_fast(self):\n \"\"\"Simple case with two cached commits and only the fast command.\"\"\"\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c2] Fake bad commit\\ngit bisect bad c2\\n# first bad commit: [c2] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit description'), (CommitHash('c2'), 'Commit description')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n result: BisectLog = session.run('c1', 'c2', ['fast_script.sh'])\n self.assertEqual(result.candidates, ['c2'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c2'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n\n def test_only_fast(self):\n \"\"\"Mix of cached and uncached with only the fast command.\"\"\"\n BISECT_LOG = 'git bisect start\\n# status: waiting for both good and bad commits\\n# skip: [c2] Skipped commit\\ngit bisect skip c2\\n# good: [c1] Fake good commit\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c3] Fake bad commit\\ngit bisect bad c3\\n# only skipped commits left to test\\n# possible first bad commit: [c2] Skipped commit\\n# possible first bad commit: [c3] Fake bad commit\\n'\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Fake good commit'), (CommitHash('c2'), 'Skipped commit'), (CommitHash('c3'), 'Fake bad commit')])\n git.bisect_log = MagicMock(return_value=BisectLog(BISECT_LOG))\n session = BisectSession(git, cache_keys=set(['c1', 'c3']))\n bisect_log = session.run('c1', 'c3', ['fast_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c2', 'c3'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c3'), call('bisect', 'skip', 'c2'), call('bisect', 'run', 'fast_script.sh')])\n git.bisect_log.assert_called_once_with()\n git.bisect_view.assert_called_once_with()\n\n def test_fast_and_slow(self):\n \"\"\"Mix of cached and uncached with fast and slow command.\"\"\"\n BISECT_LOG_1 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# skip: [c3] Commit 3\\ngit bisect skip c3\\n# skip: [c4] Commit 4\\ngit bisect skip c4\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# only skipped commits left to test\\n# possible first bad commit: [c3] Commit 3\\n# possible first bad commit: [c4] Commit 4\\n'\n BISECT_LOG_2 = 'git bisect start\\n# status: waiting for both good and bad commits\\n# good: [c1] Commit 1\\ngit bisect good c1\\n# status: waiting for bad commit, 1 good commit known\\n# bad: [c5] Commit 5\\ngit bisect bad c5\\n# good: [c2] Commit 2\\ngit bisect good c2\\n# good: [c3] Commit 3\\ngit bisect good c3\\n# bad: [c4] Commit 4\\ngit bisect bad c4\\n# first bad commit: [c4] Commit 4\\n'\n parsed_bisect_logs = [BisectLog(BISECT_LOG_1), BisectLog(BISECT_LOG_2)]\n git = MockableGitController()\n git._git = MagicMock(return_value=(0, '', ''))\n git.bisect_log = MagicMock(side_effect=parsed_bisect_logs)\n git.bisect_view = MagicMock(return_value=[(CommitHash('c1'), 'Commit 1'), (CommitHash('c2'), 'Commit 2'), (CommitHash('c3'), 'Commit 3'), (CommitHash('c4'), 'Commit 4'), (CommitHash('c5'), 'Commit 5')])\n session = BisectSession(git, cache_keys=set(['c1', 'c2']))\n bisect_log = session.run('c1', 'c5', ['fast_script.sh'], ['slow_script.sh'])\n self.assertEqual(bisect_log.candidates, ['c4'])\n git._git.assert_has_calls([call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'skip', 'c3', 'c4', 'c5'), call('bisect', 'run', 'fast_script.sh'), call('bisect', 'reset'), call('bisect', 'start'), call('bisect', 'good', 'c1'), call('bisect', 'bad', 'c5'), call('bisect', 'good', 'c2'), call('bisect', 'run', 'slow_script.sh')])\n git.bisect_log.assert_has_calls([call(), call()])\n git.bisect_view.assert_called_once_with()\n", "source": "the_stack_v2_python_sparse", "source_path": "util/fpga/bitstream_bisect_test.py", "source_repo": "lowRISC/opentitan", "split": "val", "star_events_count": 2077} {"blob_id": "8d5074a7628de6b7e78e90809a9c21125afb7af5", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_williami', 'ajr10_williami')\nrepo.dropCollection('ajr10_williami.cleaned_trees_cambridge')\nrepo.createCollection('ajr10_williami.cleaned_trees_cambridge')\nrepo.dropCollection('ajr10_williami.cleaned_trees_boston')\nrepo.createCollection('ajr10_williami.cleaned_trees_boston')\ntrees_cambridge = repo['ajr10_williami.trees_cambridge'].find()\ntrees_boston = repo['ajr10_williami.trees_boston'].find().limit(50)\nfor cambridge_tree in trees_cambridge:\n coords = cambridge_tree['the_geom']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_cambridge'].insert(new_tree)\nfor boston_tree in trees_boston:\n coords = boston_tree['geometry']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_boston'].insert(new_tree)\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_williami', 'ajr10_williami')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('awc', 'ajr10_williami')\nthis_script = doc.agent('alg:ajr10_williami#clean_trees', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\ntrees_cambridge_resource = doc.entity('awc:trees_cambridge', {'prov:label': 'cleaned_trees_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\ntrees_boston_resource = doc.entity('awc:trees_boston', {'prov:label': 'cleaned_trees_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nget_trees_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\nget_trees_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_trees_cambridge, this_script)\ndoc.wasAssociatedWith(get_trees_boston, this_script)\ndoc.usage(get_trees_cambridge, trees_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Cambridge'})\ndoc.usage(get_trees_boston, trees_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Boston'})\nclean_trees_cambridge = doc.entity('dat:ajr10_williami#cleaned_trees_cambridge', {prov.model.PROV_LABEL: 'Cleaned Trees Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(clean_trees_cambridge, this_script)\ndoc.wasGeneratedBy(clean_trees_cambridge, get_trees_cambridge, endTime)\ndoc.wasDerivedFrom(clean_trees_cambridge, trees_cambridge_resource, get_trees_cambridge, get_trees_cambridge, get_trees_cambridge)\nclean_trees_boston = doc.entity('dat:ajr10_williami#cleaned_trees_boston', {prov.model.PROV_LABEL: 'Cleaned Trees Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(clean_trees_boston, this_script)\ndoc.wasGeneratedBy(clean_trees_boston, get_trees_boston, endTime)\ndoc.wasDerivedFrom(clean_trees_boston, trees_boston_resource, get_trees_boston, get_trees_boston, get_trees_boston)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.createCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_trees_boston')\n repo.createCollection('ajr10_williami.cleaned_trees_boston')\n trees_cambridge = repo['ajr10_williami.trees_cambridge'].find()\n trees_boston = repo['ajr10_williami.trees_boston'].find().limit(50)\n for cambridge_tree in trees_cambridge:\n coords = cambridge_tree['the_geom']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_cambridge'].insert(new_tree)\n for boston_tree in trees_boston:\n coords = boston_tree['geometry']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_boston'].insert(new_tree)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_trees', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n trees_cambridge_resource = doc.entity('awc:trees_cambridge', {'prov:label': 'cleaned_trees_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n trees_boston_resource = doc.entity('awc:trees_boston', {'prov:label': 'cleaned_trees_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_trees_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_trees_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_trees_cambridge, this_script)\n doc.wasAssociatedWith(get_trees_boston, this_script)\n doc.usage(get_trees_cambridge, trees_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Cambridge'})\n doc.usage(get_trees_boston, trees_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Boston'})\n clean_trees_cambridge = doc.entity('dat:ajr10_williami#cleaned_trees_cambridge', {prov.model.PROV_LABEL: 'Cleaned Trees Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_cambridge, this_script)\n doc.wasGeneratedBy(clean_trees_cambridge, get_trees_cambridge, endTime)\n doc.wasDerivedFrom(clean_trees_cambridge, trees_cambridge_resource, get_trees_cambridge, get_trees_cambridge, get_trees_cambridge)\n clean_trees_boston = doc.entity('dat:ajr10_williami#cleaned_trees_boston', {prov.model.PROV_LABEL: 'Cleaned Trees Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_boston, this_script)\n doc.wasGeneratedBy(clean_trees_boston, get_trees_boston, endTime)\n doc.wasDerivedFrom(clean_trees_boston, trees_boston_resource, get_trees_boston, get_trees_boston, get_trees_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "clean_trees", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass clean_trees:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.createCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_trees_boston')\n repo.createCollection('ajr10_williami.cleaned_trees_boston')\n trees_cambridge = repo['ajr10_williami.trees_cambridge'].find()\n trees_boston = repo['ajr10_williami.trees_boston'].find().limit(50)\n for cambridge_tree in trees_cambridge:\n coords = cambridge_tree['the_geom']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_cambridge'].insert(new_tree)\n for boston_tree in trees_boston:\n coords = boston_tree['geometry']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_boston'].insert(new_tree)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_trees', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n trees_cambridge_resource = doc.entity('awc:trees_cambridge', {'prov:label': 'cleaned_trees_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n trees_boston_resource = doc.entity('awc:trees_boston', {'prov:label': 'cleaned_trees_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_trees_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_trees_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_trees_cambridge, this_script)\n doc.wasAssociatedWith(get_trees_boston, this_script)\n doc.usage(get_trees_cambridge, trees_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Cambridge'})\n doc.usage(get_trees_boston, trees_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Boston'})\n clean_trees_cambridge = doc.entity('dat:ajr10_williami#cleaned_trees_cambridge', {prov.model.PROV_LABEL: 'Cleaned Trees Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_cambridge, this_script)\n doc.wasGeneratedBy(clean_trees_cambridge, get_trees_cambridge, endTime)\n doc.wasDerivedFrom(clean_trees_cambridge, trees_cambridge_resource, get_trees_cambridge, get_trees_cambridge, get_trees_cambridge)\n clean_trees_boston = doc.entity('dat:ajr10_williami#cleaned_trees_boston', {prov.model.PROV_LABEL: 'Cleaned Trees Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_boston, this_script)\n doc.wasGeneratedBy(clean_trees_boston, get_trees_boston, endTime)\n doc.wasDerivedFrom(clean_trees_boston, trees_boston_resource, get_trees_boston, get_trees_boston, get_trees_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000435", "length_bytes": 5428, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets and store in mongodb collections.", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006606", "prompt": "Implement the Python class `clean_trees` described below.\n\nClass description:\nImplement the clean_trees class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets and store in mongodb collections.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `clean_trees` described below.\n\nClass description:\nImplement the clean_trees class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets and store in mongodb collections.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass clean_trees:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.createCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_trees_boston')\n repo.createCollection('ajr10_williami.cleaned_trees_boston')\n trees_cambridge = repo['ajr10_williami.trees_cambridge'].find()\n trees_boston = repo['ajr10_williami.trees_boston'].find().limit(50)\n for cambridge_tree in trees_cambridge:\n coords = cambridge_tree['the_geom']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_cambridge'].insert(new_tree)\n for boston_tree in trees_boston:\n coords = boston_tree['geometry']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_boston'].insert(new_tree)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_trees', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n trees_cambridge_resource = doc.entity('awc:trees_cambridge', {'prov:label': 'cleaned_trees_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n trees_boston_resource = doc.entity('awc:trees_boston', {'prov:label': 'cleaned_trees_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_trees_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_trees_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_trees_cambridge, this_script)\n doc.wasAssociatedWith(get_trees_boston, this_script)\n doc.usage(get_trees_cambridge, trees_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Cambridge'})\n doc.usage(get_trees_boston, trees_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Boston'})\n clean_trees_cambridge = doc.entity('dat:ajr10_williami#cleaned_trees_cambridge', {prov.model.PROV_LABEL: 'Cleaned Trees Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_cambridge, this_script)\n doc.wasGeneratedBy(clean_trees_cambridge, get_trees_cambridge, endTime)\n doc.wasDerivedFrom(clean_trees_cambridge, trees_cambridge_resource, get_trees_cambridge, get_trees_cambridge, get_trees_cambridge)\n clean_trees_boston = doc.entity('dat:ajr10_williami#cleaned_trees_boston', {prov.model.PROV_LABEL: 'Cleaned Trees Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_boston, this_script)\n doc.wasGeneratedBy(clean_trees_boston, get_trees_boston, endTime)\n doc.wasDerivedFrom(clean_trees_boston, trees_boston_resource, get_trees_boston, get_trees_boston, get_trees_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "0df485d0469c5451ebdcd684bed2a0960ba3ab84", "skeleton": "<|skeleton|>\nclass clean_trees:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class clean_trees:\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.createCollection('ajr10_williami.cleaned_trees_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_trees_boston')\n repo.createCollection('ajr10_williami.cleaned_trees_boston')\n trees_cambridge = repo['ajr10_williami.trees_cambridge'].find()\n trees_boston = repo['ajr10_williami.trees_boston'].find().limit(50)\n for cambridge_tree in trees_cambridge:\n coords = cambridge_tree['the_geom']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_cambridge'].insert(new_tree)\n for boston_tree in trees_boston:\n coords = boston_tree['geometry']['coordinates']\n new_tree = {}\n new_tree['longitude'] = coords[0]\n new_tree['latitude'] = coords[1]\n repo['ajr10_williami.cleaned_trees_boston'].insert(new_tree)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_trees', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n trees_cambridge_resource = doc.entity('awc:trees_cambridge', {'prov:label': 'cleaned_trees_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n trees_boston_resource = doc.entity('awc:trees_boston', {'prov:label': 'cleaned_trees_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_trees_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_trees_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_trees_cambridge, this_script)\n doc.wasAssociatedWith(get_trees_boston, this_script)\n doc.usage(get_trees_cambridge, trees_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Cambridge'})\n doc.usage(get_trees_boston, trees_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Clean+Trees+Boston'})\n clean_trees_cambridge = doc.entity('dat:ajr10_williami#cleaned_trees_cambridge', {prov.model.PROV_LABEL: 'Cleaned Trees Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_cambridge, this_script)\n doc.wasGeneratedBy(clean_trees_cambridge, get_trees_cambridge, endTime)\n doc.wasDerivedFrom(clean_trees_cambridge, trees_cambridge_resource, get_trees_cambridge, get_trees_cambridge, get_trees_cambridge)\n clean_trees_boston = doc.entity('dat:ajr10_williami#cleaned_trees_boston', {prov.model.PROV_LABEL: 'Cleaned Trees Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(clean_trees_boston, this_script)\n doc.wasGeneratedBy(clean_trees_boston, get_trees_boston, endTime)\n doc.wasDerivedFrom(clean_trees_boston, trees_boston_resource, get_trees_boston, get_trees_boston, get_trees_boston)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "ajr10_williami/clean_trees.py", "source_repo": "lingyigu/course-2017-spr-proj", "split": "val", "star_events_count": 0} {"blob_id": "353b928a0c095125f0c40e13d38bb35faa419df1", "bodies": ["left, right = (0, 19)\nwhile left <= right:\n mid = left + (right - left) / 2\n if 3 ** mid == n:\n return True\n elif 3 ** mid > n:\n right = mid - 1\n else:\n left = mid + 1\nreturn False", "while n > 1 and n % 3 == 0:\n n /= 3\nreturn n == 1"], "bodies_text": "<|body_start_0|>\n left, right = (0, 19)\n while left <= right:\n mid = left + (right - left) / 2\n if 3 ** mid == n:\n return True\n elif 3 ** mid > n:\n right = mid - 1\n else:\n left = mid + 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n while n > 1 and n % 3 == 0:\n n /= 3\n return n == 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left, right = (0, 19)\n while left <= right:\n mid = left + (right - left) / 2\n if 3 ** mid == n:\n return True\n elif 3 ** mid > n:\n right = mid - 1\n else:\n left = mid + 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n while n > 1 and n % 3 == 0:\n n /= 3\n return n == 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000436", "length_bytes": 597, "license_type": "permissive", "methods": [{"docstring": ":type n: int :rtype: bool", "name": "isPowerOfThree", "signature": "def isPowerOfThree(self, n)"}, {"docstring": ":type n: int :rtype: bool", "name": "isPowerOfThree2", "signature": "def isPowerOfThree2(self, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfThree(self, n): :type n: int :rtype: bool\n- def isPowerOfThree2(self, n): :type n: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfThree(self, n): :type n: int :rtype: bool\n- def isPowerOfThree2(self, n): :type n: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n left, right = (0, 19)\n while left <= right:\n mid = left + (right - left) / 2\n if 3 ** mid == n:\n return True\n elif 3 ** mid > n:\n right = mid - 1\n else:\n left = mid + 1\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n while n > 1 and n % 3 == 0:\n n /= 3\n return n == 1\n<|end_body_1|>\n", "revision_id": "c8bf33af30569177c5276ffcd72a8d93ba4c402a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n left, right = (0, 19)\n while left <= right:\n mid = left + (right - left) / 2\n if 3 ** mid == n:\n return True\n elif 3 ** mid > n:\n right = mid - 1\n else:\n left = mid + 1\n return False\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n while n > 1 and n % 3 == 0:\n n /= 3\n return n == 1\n", "source": "the_stack_v2_python_sparse", "source_path": "301-400/321-330/326-powerOfThree/powerOfThree.py", "source_repo": "xuychen/Leetcode", "split": "val", "star_events_count": 0} {"blob_id": "895d9301480807ae5130d3d45a0a9f04c98c63bc", "bodies": ["for letter in letters:\n if letter > target:\n return letter\nreturn letters[0]", "l, r = (0, len(letters))\nwhile l < r:\n m = l + (r - l) // 2\n if letters[m] > target:\n r = m\n else:\n l = m + 1\nif l == len(letters):\n return letters[0]\nreturn letters[l]"], "bodies_text": "<|body_start_0|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(letters))\n while l < r:\n m = l + (r - l) // 2\n if letters[m] > target:\n r = m\n else:\n l = m + 1\n if l == len(letters):\n return letters[0]\n return letters[l]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Sequential Search: O(n), Space: O(1)\"\"\"\n <|body_0|>\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Binary Search: O(logn), Space: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(letters))\n while l < r:\n m = l + (r - l) // 2\n if letters[m] > target:\n r = m\n else:\n l = m + 1\n if l == len(letters):\n return letters[0]\n return letters[l]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000437", "length_bytes": 1312, "license_type": "no_license", "methods": [{"docstring": "Sequential Search: O(n), Space: O(1)", "name": "nextGreatestLetter", "signature": "def nextGreatestLetter(self, letters: List[str], target: str) -> str"}, {"docstring": "Binary Search: O(logn), Space: O(1)", "name": "nextGreatestLetter", "signature": "def nextGreatestLetter(self, letters: List[str], target: str) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextGreatestLetter(self, letters: List[str], target: str) -> str: Sequential Search: O(n), Space: O(1)\n- def nextGreatestLetter(self, letters: List[str], target: str) -> str: Binary Search: O(logn), Space: O(1)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextGreatestLetter(self, letters: List[str], target: str) -> str: Sequential Search: O(n), Space: O(1)\n- def nextGreatestLetter(self, letters: List[str], target: str) -> str: Binary Search: O(logn), Space: O(1)\n\n<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Sequential Search: O(n), Space: O(1)\"\"\"\n <|body_0|>\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Binary Search: O(logn), Space: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(letters))\n while l < r:\n m = l + (r - l) // 2\n if letters[m] > target:\n r = m\n else:\n l = m + 1\n if l == len(letters):\n return letters[0]\n return letters[l]\n<|end_body_1|>\n", "revision_id": "72136e3487d239f5b37e2d6393e034262a6bf599", "skeleton": "<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Sequential Search: O(n), Space: O(1)\"\"\"\n <|body_0|>\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Binary Search: O(logn), Space: O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Sequential Search: O(n), Space: O(1)\"\"\"\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n\n def nextGreatestLetter(self, letters: List[str], target: str) -> str:\n \"\"\"Binary Search: O(logn), Space: O(1)\"\"\"\n l, r = (0, len(letters))\n while l < r:\n m = l + (r - l) // 2\n if letters[m] > target:\n r = m\n else:\n l = m + 1\n if l == len(letters):\n return letters[0]\n return letters[l]\n", "source": "the_stack_v2_python_sparse", "source_path": "python/744-Find Smallest Letter Greater Than Target.py", "source_repo": "cwza/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "42480c4d93106030127c393894ff33ac1be20346", "bodies": ["super().__init__()\nself.embedding = tf.keras.layers.Embedding(vocab, embedding)\nself.gru = tf.keras.layers.GRU(units, recurrent_initializer='glorot_uniform', return_sequences=True, return_state=True)\nself.F = tf.keras.layers.Dense(vocab)", "attention = SelfAttention(s_prev.shape[1])\ncontext, weights = attention(s_prev, hidden_states)\nx = self.embedding(x)\nx = tf.concat([tf.expand_dims(context, 1), x], -1)\ndecode_out, state = self.gru(x)\ndecode_out = tf.reshape(decode_out, (-1, decode_out.shape[2]))\ny = self.F(decode_out)\nreturn (y, state)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.embedding = tf.keras.layers.Embedding(vocab, embedding)\n self.gru = tf.keras.layers.GRU(units, recurrent_initializer='glorot_uniform', return_sequences=True, return_state=True)\n self.F = tf.keras.layers.Dense(vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n attention = SelfAttention(s_prev.shape[1])\n context, weights = attention(s_prev, hidden_states)\n x = self.embedding(x)\n x = tf.concat([tf.expand_dims(context, 1), x], -1)\n decode_out, state = self.gru(x)\n decode_out = tf.reshape(decode_out, (-1, decode_out.shape[2]))\n y = self.F(decode_out)\n return (y, state)\n<|end_body_1|>\n", "class_docstring": "Decode for machine translation", "class_name": "RNNDecoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RNNDecoder:\n \"\"\"Decode for machine translation\"\"\"\n\n def __init__(self, vocab, embedding, units, batch):\n \"\"\"vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\"\"\"\n <|body_0|>\n\n def call(self, x, s_prev, hidden_states):\n \"\"\"x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.embedding = tf.keras.layers.Embedding(vocab, embedding)\n self.gru = tf.keras.layers.GRU(units, recurrent_initializer='glorot_uniform', return_sequences=True, return_state=True)\n self.F = tf.keras.layers.Dense(vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n attention = SelfAttention(s_prev.shape[1])\n context, weights = attention(s_prev, hidden_states)\n x = self.embedding(x)\n x = tf.concat([tf.expand_dims(context, 1), x], -1)\n decode_out, state = self.gru(x)\n decode_out = tf.reshape(decode_out, (-1, decode_out.shape[2]))\n y = self.F(decode_out)\n return (y, state)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000438", "length_bytes": 1824, "license_type": "no_license", "methods": [{"docstring": "vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size", "name": "__init__", "signature": "def __init__(self, vocab, embedding, units, batch)"}, {"docstring": "x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.", "name": "call", "signature": "def call(self, x, s_prev, hidden_states)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002194", "prompt": "Implement the Python class `RNNDecoder` described below.\n\nClass description:\nDecode for machine translation\n\nMethod signatures and docstrings:\n- def __init__(self, vocab, embedding, units, batch): vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\n- def call(self, x, s_prev, hidden_states): x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.", "prompted_full_text": "Implement the Python class `RNNDecoder` described below.\n\nClass description:\nDecode for machine translation\n\nMethod signatures and docstrings:\n- def __init__(self, vocab, embedding, units, batch): vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\n- def call(self, x, s_prev, hidden_states): x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.\n\n<|skeleton|>\nclass RNNDecoder:\n \"\"\"Decode for machine translation\"\"\"\n\n def __init__(self, vocab, embedding, units, batch):\n \"\"\"vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\"\"\"\n <|body_0|>\n\n def call(self, x, s_prev, hidden_states):\n \"\"\"x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.embedding = tf.keras.layers.Embedding(vocab, embedding)\n self.gru = tf.keras.layers.GRU(units, recurrent_initializer='glorot_uniform', return_sequences=True, return_state=True)\n self.F = tf.keras.layers.Dense(vocab)\n<|end_body_0|>\n\n<|body_start_1|>\n attention = SelfAttention(s_prev.shape[1])\n context, weights = attention(s_prev, hidden_states)\n x = self.embedding(x)\n x = tf.concat([tf.expand_dims(context, 1), x], -1)\n decode_out, state = self.gru(x)\n decode_out = tf.reshape(decode_out, (-1, decode_out.shape[2]))\n y = self.F(decode_out)\n return (y, state)\n<|end_body_1|>\n", "revision_id": "b0c18df889d8bd0c24d4bdbbd69be06bc5c0a918", "skeleton": "<|skeleton|>\nclass RNNDecoder:\n \"\"\"Decode for machine translation\"\"\"\n\n def __init__(self, vocab, embedding, units, batch):\n \"\"\"vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\"\"\"\n <|body_0|>\n\n def call(self, x, s_prev, hidden_states):\n \"\"\"x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RNNDecoder:\n \"\"\"Decode for machine translation\"\"\"\n\n def __init__(self, vocab, embedding, units, batch):\n \"\"\"vocab is an integer representing the size of the decode_out vocabulary embedding is an integer representing the dimensionality of the embedding vector units is an integer representing the number of hidden units in the RNN cell batch is an integer representing the batch size\"\"\"\n super().__init__()\n self.embedding = tf.keras.layers.Embedding(vocab, embedding)\n self.gru = tf.keras.layers.GRU(units, recurrent_initializer='glorot_uniform', return_sequences=True, return_state=True)\n self.F = tf.keras.layers.Dense(vocab)\n\n def call(self, x, s_prev, hidden_states):\n \"\"\"x is a tensor of shape (batch, 1) containing the previous word in the target sequence as an index of the target vocabulary. s_prev is a tensor of shape (batch, units) containing the previous decoder hidden state. hidden_states is a tensor of shape (batch, input_seq_len, units) containing the outputs of the encoder.\"\"\"\n attention = SelfAttention(s_prev.shape[1])\n context, weights = attention(s_prev, hidden_states)\n x = self.embedding(x)\n x = tf.concat([tf.expand_dims(context, 1), x], -1)\n decode_out, state = self.gru(x)\n decode_out = tf.reshape(decode_out, (-1, decode_out.shape[2]))\n y = self.F(decode_out)\n return (y, state)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x11-attention/2-rnn_decoder.py", "source_repo": "Gaspela/holbertonschool-machine_learning", "split": "val", "star_events_count": 0} {"blob_id": "d6a1b10239710e84255053a1949b25a24290514c", "bodies": ["if len(nums) <= 2:\n return target in nums\nelse:\n L = 0\n H = len(nums) - 1\n M = (L + H) // 2\n Lo = nums[L]\n Hi = nums[H]\n Mid = nums[M]\n if Lo <= Mid <= Hi:\n return self.search(nums[L:M], target) or self.search(nums[M:], target)\n elif Lo > Mid and Mid <= Hi:\n return self.search(nums[L:M], target) or self.binarysearch(nums[M:], target)\n elif Lo <= Mid and Mid > Hi:\n return self.binarysearch(nums[L:M + 1], target) or self.search(nums[M + 1:], target)", "L = 0\nH = len(nums) - 1\nwhile L <= H:\n M = (L + H) // 2\n if nums[M] == target:\n return True\n elif nums[M] < target:\n L = M + 1\n else:\n H = M - 1\nreturn False"], "bodies_text": "<|body_start_0|>\n if len(nums) <= 2:\n return target in nums\n else:\n L = 0\n H = len(nums) - 1\n M = (L + H) // 2\n Lo = nums[L]\n Hi = nums[H]\n Mid = nums[M]\n if Lo <= Mid <= Hi:\n return self.search(nums[L:M], target) or self.search(nums[M:], target)\n elif Lo > Mid and Mid <= Hi:\n return self.search(nums[L:M], target) or self.binarysearch(nums[M:], target)\n elif Lo <= Mid and Mid > Hi:\n return self.binarysearch(nums[L:M + 1], target) or self.search(nums[M + 1:], target)\n<|end_body_0|>\n\n<|body_start_1|>\n L = 0\n H = len(nums) - 1\n while L <= H:\n M = (L + H) // 2\n if nums[M] == target:\n return True\n elif nums[M] < target:\n L = M + 1\n else:\n H = M - 1\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution_A2", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution_A2:\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\"\"\"\n <|body_0|>\n\n def binarysearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Binary search in sorted array\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 2:\n return target in nums\n else:\n L = 0\n H = len(nums) - 1\n M = (L + H) // 2\n Lo = nums[L]\n Hi = nums[H]\n Mid = nums[M]\n if Lo <= Mid <= Hi:\n return self.search(nums[L:M], target) or self.search(nums[M:], target)\n elif Lo > Mid and Mid <= Hi:\n return self.search(nums[L:M], target) or self.binarysearch(nums[M:], target)\n elif Lo <= Mid and Mid > Hi:\n return self.binarysearch(nums[L:M + 1], target) or self.search(nums[M + 1:], target)\n<|end_body_0|>\n\n<|body_start_1|>\n L = 0\n H = len(nums) - 1\n while L <= H:\n M = (L + H) // 2\n if nums[M] == target:\n return True\n elif nums[M] < target:\n L = M + 1\n else:\n H = M - 1\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000439", "length_bytes": 6122, "license_type": "permissive", "methods": [{"docstring": "本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多", "name": "search", "signature": "def search(self, nums: List[int], target: int) -> bool"}, {"docstring": "Binary search in sorted array", "name": "binarysearch", "signature": "def binarysearch(self, nums: List[int], target: int) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007060", "prompt": "Implement the Python class `Solution_A2` described below.\n\nClass description:\nImplement the Solution_A2 class.\n\nMethod signatures and docstrings:\n- def search(self, nums: List[int], target: int) -> bool: 本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\n- def binarysearch(self, nums: List[int], target: int) -> bool: Binary search in sorted array", "prompted_full_text": "Implement the Python class `Solution_A2` described below.\n\nClass description:\nImplement the Solution_A2 class.\n\nMethod signatures and docstrings:\n- def search(self, nums: List[int], target: int) -> bool: 本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\n- def binarysearch(self, nums: List[int], target: int) -> bool: Binary search in sorted array\n\n<|skeleton|>\nclass Solution_A2:\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\"\"\"\n <|body_0|>\n\n def binarysearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Binary search in sorted array\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) <= 2:\n return target in nums\n else:\n L = 0\n H = len(nums) - 1\n M = (L + H) // 2\n Lo = nums[L]\n Hi = nums[H]\n Mid = nums[M]\n if Lo <= Mid <= Hi:\n return self.search(nums[L:M], target) or self.search(nums[M:], target)\n elif Lo > Mid and Mid <= Hi:\n return self.search(nums[L:M], target) or self.binarysearch(nums[M:], target)\n elif Lo <= Mid and Mid > Hi:\n return self.binarysearch(nums[L:M + 1], target) or self.search(nums[M + 1:], target)\n<|end_body_0|>\n\n<|body_start_1|>\n L = 0\n H = len(nums) - 1\n while L <= H:\n M = (L + H) // 2\n if nums[M] == target:\n return True\n elif nums[M] < target:\n L = M + 1\n else:\n H = M - 1\n return False\n<|end_body_1|>\n", "revision_id": "143422321cbc3715ca08f6c3af8f960a55887ced", "skeleton": "<|skeleton|>\nclass Solution_A2:\n\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\"\"\"\n <|body_0|>\n\n def binarysearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Binary search in sorted array\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution_A2:\n def search(self, nums: List[int], target: int) -> bool:\n \"\"\"本质上是与VersionA1同一个意思,只是少一个helper判断两段是否是sorted. 把这个Helper的判断直接引入到了递归判断之中. 代码更简洁,但是缺点是,判断不够准确导致可能要连续对两段都使用search,递归层数增多\"\"\"\n if len(nums) <= 2:\n return target in nums\n else:\n L = 0\n H = len(nums) - 1\n M = (L + H) // 2\n Lo = nums[L]\n Hi = nums[H]\n Mid = nums[M]\n if Lo <= Mid <= Hi:\n return self.search(nums[L:M], target) or self.search(nums[M:], target)\n elif Lo > Mid and Mid <= Hi:\n return self.search(nums[L:M], target) or self.binarysearch(nums[M:], target)\n elif Lo <= Mid and Mid > Hi:\n return self.binarysearch(nums[L:M + 1], target) or self.search(nums[M + 1:], target)\n\n def binarysearch(self, nums: List[int], target: int) -> bool:\n \"\"\"Binary search in sorted array\"\"\"\n L = 0\n H = len(nums) - 1\n while L <= H:\n M = (L + H) // 2\n if nums[M] == target:\n return True\n elif nums[M] < target:\n L = M + 1\n else:\n H = M - 1\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/LC081_search_in_rotated_sorted_array_ii.py", "source_repo": "jxie0755/Learning_Python", "split": "val", "star_events_count": 0} {"blob_id": "b99ae06ab598b2f87aa2dc40d929f9f8e1f31faf", "bodies": ["super(DecoderBlock, self).__init__()\nself.mha1 = MultiHeadAttention(dm, h)\nself.mha2 = MultiHeadAttention(dm, h)\nself.dense_hidden = tf.keras.layers.Dense(hidden, activation='relu')\nself.dense_output = tf.keras.layers.Dense(dm)\nself.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\nself.dropout1 = tf.keras.layers.Dropout(rate)\nself.dropout2 = tf.keras.layers.Dropout(rate)\nself.dropout3 = tf.keras.layers.Dropout(rate)", "attn1, _ = self.mha1(x, x, x, look_ahead_mask)\nattn1 = self.dropout1(attn1, training=training)\nout1 = self.layernorm1(attn1 + x)\nattn2, _ = self.mha2(out1, enc_output, enc_output, padding_mask)\nattn2 = self.dropout2(attn2, training=training)\nout2 = self.layernorm2(attn2 + out1)\nf_hidden = self.dense_hidden(out2)\nf_output = self.dense_output(f_hidden)\nf_output = self.dropout3(f_output, training=training)\nout3 = self.layernorm3(f_output + out2)\nreturn out3"], "bodies_text": "<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn1, _ = self.mha1(x, x, x, look_ahead_mask)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n attn2, _ = self.mha2(out1, enc_output, enc_output, padding_mask)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1)\n f_hidden = self.dense_hidden(out2)\n f_output = self.dense_output(f_hidden)\n f_output = self.dropout3(f_output, training=training)\n out3 = self.layernorm3(f_output + out2)\n return out3\n<|end_body_1|>\n", "class_docstring": "class", "class_name": "DecoderBlock", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DecoderBlock:\n \"\"\"class\"\"\"\n\n def __init__(self, dm, h, hidden, rate=0.1):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n \"\"\"method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn1, _ = self.mha1(x, x, x, look_ahead_mask)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n attn2, _ = self.mha2(out1, enc_output, enc_output, padding_mask)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1)\n f_hidden = self.dense_hidden(out2)\n f_output = self.dense_output(f_hidden)\n f_output = self.dropout3(f_output, training=training)\n out3 = self.layernorm3(f_output + out2)\n return out3\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000440", "length_bytes": 1573, "license_type": "no_license", "methods": [{"docstring": "constructor", "name": "__init__", "signature": "def __init__(self, dm, h, hidden, rate=0.1)"}, {"docstring": "method", "name": "call", "signature": "def call(self, x, enc_output, training, look_ahead_mask, padding_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001761", "prompt": "Implement the Python class `DecoderBlock` described below.\n\nClass description:\nclass\n\nMethod signatures and docstrings:\n- def __init__(self, dm, h, hidden, rate=0.1): constructor\n- def call(self, x, enc_output, training, look_ahead_mask, padding_mask): method", "prompted_full_text": "Implement the Python class `DecoderBlock` described below.\n\nClass description:\nclass\n\nMethod signatures and docstrings:\n- def __init__(self, dm, h, hidden, rate=0.1): constructor\n- def call(self, x, enc_output, training, look_ahead_mask, padding_mask): method\n\n<|skeleton|>\nclass DecoderBlock:\n \"\"\"class\"\"\"\n\n def __init__(self, dm, h, hidden, rate=0.1):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n \"\"\"method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n<|end_body_0|>\n\n<|body_start_1|>\n attn1, _ = self.mha1(x, x, x, look_ahead_mask)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n attn2, _ = self.mha2(out1, enc_output, enc_output, padding_mask)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1)\n f_hidden = self.dense_hidden(out2)\n f_output = self.dense_output(f_hidden)\n f_output = self.dropout3(f_output, training=training)\n out3 = self.layernorm3(f_output + out2)\n return out3\n<|end_body_1|>\n", "revision_id": "b5e8f1253309567ca7be71b9575a150de1be3820", "skeleton": "<|skeleton|>\nclass DecoderBlock:\n \"\"\"class\"\"\"\n\n def __init__(self, dm, h, hidden, rate=0.1):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n \"\"\"method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DecoderBlock:\n \"\"\"class\"\"\"\n\n def __init__(self, dm, h, hidden, rate=0.1):\n \"\"\"constructor\"\"\"\n super(DecoderBlock, self).__init__()\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(hidden, activation='relu')\n self.dense_output = tf.keras.layers.Dense(dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-06)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n \"\"\"method\"\"\"\n attn1, _ = self.mha1(x, x, x, look_ahead_mask)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n attn2, _ = self.mha2(out1, enc_output, enc_output, padding_mask)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1)\n f_hidden = self.dense_hidden(out2)\n f_output = self.dense_output(f_hidden)\n f_output = self.dropout3(f_output, training=training)\n out3 = self.layernorm3(f_output + out2)\n return out3\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x11-attention/8-transformer_decoder_block.py", "source_repo": "jadsm98/holbertonschool-machine_learning", "split": "val", "star_events_count": 0} {"blob_id": "c5292bbceda91908eadcb996dce9e5466d6e728e", "bodies": ["products = response.xpath('//a[@class=\"tile\"]')\nfor product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\nnext_page = response.xpath('//span[@class=\"pager_next\"]/a')\nif next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)", "encoded = response.xpath('//script').re_first('window.scriptCtx = \"([^\"]+)\"')\ndecoded = base64.b64decode(encoded)\ndata = json.loads(decoded)\nproduct_loader = ProductItemLoader(ProductItem(), response)\nproduct_loader.add_xpath('gtin', '//span[@itemprop=\"gtin13\"]')\nproduct_loader.add_xpath('name', '//h1[@itemprop=\"name\"]')\nproduct_loader.add_xpath('brandLogo', '//h3[@itemprop=\"brand\"]/link[@itemprop=\"logo\"]/@href')\nproduct_loader.add_xpath('brand', '//h3[@itemprop=\"brand\"]/a[@itemprop=\"name\"]')\nproduct_loader.add_xpath('category', '//ul[@itemprop=\"breadcrumb\"]/li')\nproduct_loader.add_xpath('rating', '//meta[@itemprop=\"ratingValue\"]/@content')\nproduct_loader.add_xpath('reviewCount', '//meta[@itemprop=\"reviewCount\"]/@content')\nproduct_loader.add_xpath('priceCurrency', '//meta[@itemprop=\"priceCurrency\"]/@content')\nproduct_loader.add_xpath('price', '//div[@itemprop=\"price\"]')\nproduct_loader.add_xpath('image', '//img[@itemprop=\"image\"]/@src')\nproduct_loader.add_xpath('ingredients', '//div[@id=\"accord-ingredients\"]')\nproduct_loader.add_xpath('availability', '//link[@itemprop=\"availability\"]/@href')\nproduct_loader.add_xpath('shipping', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_time\")]]/following-sibling::div[@class=\"body\"]')\nproduct_loader.add_xpath('returnPolicy', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_returns\")]]/following-sibling::div[@class=\"body\"]')\nproduct_loader.add_xpath('description', '//div[@id=\"desc-accord-content\"]')\nproduct_loader.add_value('url', response.url)\nproduct = product_loader.load_item()\nif product['reviewCount']:\n cipherid = data['ProductApp']['product']['cipheredId']\n return scrapy.Request(self.build_review_url(cipherid), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid})\nreturn product", "product = response.meta['product']\ncipherid = response.meta['cipherid']\nreviews = json.loads(response.body)\nif not reviews:\n return product\nproduct['reviews'] = product.get('reviews') or []\nfor each in reviews:\n review = self.extract_review(each)\n review['reviewer'] = self.extract_reviewer(each)\n product['reviews'].append(review)\nlimit = response.meta.get('limit', 20)\noffset = response.meta.get('offset', 0) + limit\nsort = response.meta.get('sort', 'helpful')\nreturn scrapy.Request(self.build_review_url(cipherid, offset, limit, sort), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid, 'offset': offset, 'limit': limit})", "params = {'offset': offset, 'limit': limit, 'sort': sort}\nquery = urllib.urlencode(params)\nreturn 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)", "review_loader = ReviewItemLoader(ReviewItem())\nreview_loader.add_value('title', selector['shortText'])\nreview_loader.add_value('description', selector['text'])\nreview_loader.add_value('rating', selector['rating'])\nreview_loader.add_value('helpfulCount', selector['likesCount'])\nreview_loader.add_value('reviewImage', selector['images'][0]['clUrl'] if selector['images'] else None)\nreview_loader.add_value('datePublished', selector['isoDate'])\nreturn review_loader.load_item()", "reviewer_loader = ReviewerItemLoader(ReviewerItem())\nreviewer_loader.add_value('name', selector['userDisplayName'])\nreviewer_loader.add_value('profileUrl', selector['userUrl'])\nreturn reviewer_loader.load_item()"], "bodies_text": "<|body_start_0|>\n products = response.xpath('//a[@class=\"tile\"]')\n for product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\n next_page = response.xpath('//span[@class=\"pager_next\"]/a')\n if next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)\n<|end_body_0|>\n\n<|body_start_1|>\n encoded = response.xpath('//script').re_first('window.scriptCtx = \"([^\"]+)\"')\n decoded = base64.b64decode(encoded)\n data = json.loads(decoded)\n product_loader = ProductItemLoader(ProductItem(), response)\n product_loader.add_xpath('gtin', '//span[@itemprop=\"gtin13\"]')\n product_loader.add_xpath('name', '//h1[@itemprop=\"name\"]')\n product_loader.add_xpath('brandLogo', '//h3[@itemprop=\"brand\"]/link[@itemprop=\"logo\"]/@href')\n product_loader.add_xpath('brand', '//h3[@itemprop=\"brand\"]/a[@itemprop=\"name\"]')\n product_loader.add_xpath('category', '//ul[@itemprop=\"breadcrumb\"]/li')\n product_loader.add_xpath('rating', '//meta[@itemprop=\"ratingValue\"]/@content')\n product_loader.add_xpath('reviewCount', '//meta[@itemprop=\"reviewCount\"]/@content')\n product_loader.add_xpath('priceCurrency', '//meta[@itemprop=\"priceCurrency\"]/@content')\n product_loader.add_xpath('price', '//div[@itemprop=\"price\"]')\n product_loader.add_xpath('image', '//img[@itemprop=\"image\"]/@src')\n product_loader.add_xpath('ingredients', '//div[@id=\"accord-ingredients\"]')\n product_loader.add_xpath('availability', '//link[@itemprop=\"availability\"]/@href')\n product_loader.add_xpath('shipping', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_time\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('returnPolicy', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_returns\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('description', '//div[@id=\"desc-accord-content\"]')\n product_loader.add_value('url', response.url)\n product = product_loader.load_item()\n if product['reviewCount']:\n cipherid = data['ProductApp']['product']['cipheredId']\n return scrapy.Request(self.build_review_url(cipherid), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid})\n return product\n<|end_body_1|>\n\n<|body_start_2|>\n product = response.meta['product']\n cipherid = response.meta['cipherid']\n reviews = json.loads(response.body)\n if not reviews:\n return product\n product['reviews'] = product.get('reviews') or []\n for each in reviews:\n review = self.extract_review(each)\n review['reviewer'] = self.extract_reviewer(each)\n product['reviews'].append(review)\n limit = response.meta.get('limit', 20)\n offset = response.meta.get('offset', 0) + limit\n sort = response.meta.get('sort', 'helpful')\n return scrapy.Request(self.build_review_url(cipherid, offset, limit, sort), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid, 'offset': offset, 'limit': limit})\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'offset': offset, 'limit': limit, 'sort': sort}\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)\n<|end_body_3|>\n\n<|body_start_4|>\n review_loader = ReviewItemLoader(ReviewItem())\n review_loader.add_value('title', selector['shortText'])\n review_loader.add_value('description', selector['text'])\n review_loader.add_value('rating', selector['rating'])\n review_loader.add_value('helpfulCount', selector['likesCount'])\n review_loader.add_value('reviewImage', selector['images'][0]['clUrl'] if selector['images'] else None)\n review_loader.add_value('datePublished', selector['isoDate'])\n return review_loader.load_item()\n<|end_body_4|>\n\n<|body_start_5|>\n reviewer_loader = ReviewerItemLoader(ReviewerItem())\n reviewer_loader.add_value('name', selector['userDisplayName'])\n reviewer_loader.add_value('profileUrl', selector['userUrl'])\n return reviewer_loader.load_item()\n<|end_body_5|>\n", "class_docstring": "Beautylish Products Spider", "class_name": "BeautylishProductsSpider", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BeautylishProductsSpider:\n \"\"\"Beautylish Products Spider\"\"\"\n\n def parse(self, response):\n \"\"\"Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\"\"\"\n <|body_0|>\n\n def parse_product(self, response):\n \"\"\"Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\"\"\"\n <|body_1|>\n\n def parse_reviews(self, response):\n \"\"\"Extract reviews data\"\"\"\n <|body_2|>\n\n def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n \"\"\"Build review url from cipherid\"\"\"\n <|body_3|>\n\n def extract_review(self, selector):\n \"\"\"Extract review\"\"\"\n <|body_4|>\n\n def extract_reviewer(self, selector):\n \"\"\"Extract reviewer\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n products = response.xpath('//a[@class=\"tile\"]')\n for product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\n next_page = response.xpath('//span[@class=\"pager_next\"]/a')\n if next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)\n<|end_body_0|>\n\n<|body_start_1|>\n encoded = response.xpath('//script').re_first('window.scriptCtx = \"([^\"]+)\"')\n decoded = base64.b64decode(encoded)\n data = json.loads(decoded)\n product_loader = ProductItemLoader(ProductItem(), response)\n product_loader.add_xpath('gtin', '//span[@itemprop=\"gtin13\"]')\n product_loader.add_xpath('name', '//h1[@itemprop=\"name\"]')\n product_loader.add_xpath('brandLogo', '//h3[@itemprop=\"brand\"]/link[@itemprop=\"logo\"]/@href')\n product_loader.add_xpath('brand', '//h3[@itemprop=\"brand\"]/a[@itemprop=\"name\"]')\n product_loader.add_xpath('category', '//ul[@itemprop=\"breadcrumb\"]/li')\n product_loader.add_xpath('rating', '//meta[@itemprop=\"ratingValue\"]/@content')\n product_loader.add_xpath('reviewCount', '//meta[@itemprop=\"reviewCount\"]/@content')\n product_loader.add_xpath('priceCurrency', '//meta[@itemprop=\"priceCurrency\"]/@content')\n product_loader.add_xpath('price', '//div[@itemprop=\"price\"]')\n product_loader.add_xpath('image', '//img[@itemprop=\"image\"]/@src')\n product_loader.add_xpath('ingredients', '//div[@id=\"accord-ingredients\"]')\n product_loader.add_xpath('availability', '//link[@itemprop=\"availability\"]/@href')\n product_loader.add_xpath('shipping', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_time\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('returnPolicy', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_returns\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('description', '//div[@id=\"desc-accord-content\"]')\n product_loader.add_value('url', response.url)\n product = product_loader.load_item()\n if product['reviewCount']:\n cipherid = data['ProductApp']['product']['cipheredId']\n return scrapy.Request(self.build_review_url(cipherid), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid})\n return product\n<|end_body_1|>\n\n<|body_start_2|>\n product = response.meta['product']\n cipherid = response.meta['cipherid']\n reviews = json.loads(response.body)\n if not reviews:\n return product\n product['reviews'] = product.get('reviews') or []\n for each in reviews:\n review = self.extract_review(each)\n review['reviewer'] = self.extract_reviewer(each)\n product['reviews'].append(review)\n limit = response.meta.get('limit', 20)\n offset = response.meta.get('offset', 0) + limit\n sort = response.meta.get('sort', 'helpful')\n return scrapy.Request(self.build_review_url(cipherid, offset, limit, sort), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid, 'offset': offset, 'limit': limit})\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'offset': offset, 'limit': limit, 'sort': sort}\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)\n<|end_body_3|>\n\n<|body_start_4|>\n review_loader = ReviewItemLoader(ReviewItem())\n review_loader.add_value('title', selector['shortText'])\n review_loader.add_value('description', selector['text'])\n review_loader.add_value('rating', selector['rating'])\n review_loader.add_value('helpfulCount', selector['likesCount'])\n review_loader.add_value('reviewImage', selector['images'][0]['clUrl'] if selector['images'] else None)\n review_loader.add_value('datePublished', selector['isoDate'])\n return review_loader.load_item()\n<|end_body_4|>\n\n<|body_start_5|>\n reviewer_loader = ReviewerItemLoader(ReviewerItem())\n reviewer_loader.add_value('name', selector['userDisplayName'])\n reviewer_loader.add_value('profileUrl', selector['userUrl'])\n return reviewer_loader.load_item()\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_val_000441", "length_bytes": 6662, "license_type": "no_license", "methods": [{"docstring": "Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0", "name": "parse", "signature": "def parse(self, response)"}, {"docstring": "Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1", "name": "parse_product", "signature": "def parse_product(self, response)"}, {"docstring": "Extract reviews data", "name": "parse_reviews", "signature": "def parse_reviews(self, response)"}, {"docstring": "Build review url from cipherid", "name": "build_review_url", "signature": "def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful')"}, {"docstring": "Extract review", "name": "extract_review", "signature": "def extract_review(self, selector)"}, {"docstring": "Extract reviewer", "name": "extract_reviewer", "signature": "def extract_reviewer(self, selector)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_000133", "prompt": "Implement the Python class `BeautylishProductsSpider` described below.\n\nClass description:\nBeautylish Products Spider\n\nMethod signatures and docstrings:\n- def parse(self, response): Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\n- def parse_product(self, response): Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\n- def parse_reviews(self, response): Extract reviews data\n- def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'): Build review url from cipherid\n- def extract_review(self, selector): Extract review\n- def extract_reviewer(self, selector): Extract reviewer", "prompted_full_text": "Implement the Python class `BeautylishProductsSpider` described below.\n\nClass description:\nBeautylish Products Spider\n\nMethod signatures and docstrings:\n- def parse(self, response): Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\n- def parse_product(self, response): Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\n- def parse_reviews(self, response): Extract reviews data\n- def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'): Build review url from cipherid\n- def extract_review(self, selector): Extract review\n- def extract_reviewer(self, selector): Extract reviewer\n\n<|skeleton|>\nclass BeautylishProductsSpider:\n \"\"\"Beautylish Products Spider\"\"\"\n\n def parse(self, response):\n \"\"\"Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\"\"\"\n <|body_0|>\n\n def parse_product(self, response):\n \"\"\"Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\"\"\"\n <|body_1|>\n\n def parse_reviews(self, response):\n \"\"\"Extract reviews data\"\"\"\n <|body_2|>\n\n def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n \"\"\"Build review url from cipherid\"\"\"\n <|body_3|>\n\n def extract_review(self, selector):\n \"\"\"Extract review\"\"\"\n <|body_4|>\n\n def extract_reviewer(self, selector):\n \"\"\"Extract reviewer\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n products = response.xpath('//a[@class=\"tile\"]')\n for product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\n next_page = response.xpath('//span[@class=\"pager_next\"]/a')\n if next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)\n<|end_body_0|>\n\n<|body_start_1|>\n encoded = response.xpath('//script').re_first('window.scriptCtx = \"([^\"]+)\"')\n decoded = base64.b64decode(encoded)\n data = json.loads(decoded)\n product_loader = ProductItemLoader(ProductItem(), response)\n product_loader.add_xpath('gtin', '//span[@itemprop=\"gtin13\"]')\n product_loader.add_xpath('name', '//h1[@itemprop=\"name\"]')\n product_loader.add_xpath('brandLogo', '//h3[@itemprop=\"brand\"]/link[@itemprop=\"logo\"]/@href')\n product_loader.add_xpath('brand', '//h3[@itemprop=\"brand\"]/a[@itemprop=\"name\"]')\n product_loader.add_xpath('category', '//ul[@itemprop=\"breadcrumb\"]/li')\n product_loader.add_xpath('rating', '//meta[@itemprop=\"ratingValue\"]/@content')\n product_loader.add_xpath('reviewCount', '//meta[@itemprop=\"reviewCount\"]/@content')\n product_loader.add_xpath('priceCurrency', '//meta[@itemprop=\"priceCurrency\"]/@content')\n product_loader.add_xpath('price', '//div[@itemprop=\"price\"]')\n product_loader.add_xpath('image', '//img[@itemprop=\"image\"]/@src')\n product_loader.add_xpath('ingredients', '//div[@id=\"accord-ingredients\"]')\n product_loader.add_xpath('availability', '//link[@itemprop=\"availability\"]/@href')\n product_loader.add_xpath('shipping', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_time\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('returnPolicy', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_returns\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('description', '//div[@id=\"desc-accord-content\"]')\n product_loader.add_value('url', response.url)\n product = product_loader.load_item()\n if product['reviewCount']:\n cipherid = data['ProductApp']['product']['cipheredId']\n return scrapy.Request(self.build_review_url(cipherid), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid})\n return product\n<|end_body_1|>\n\n<|body_start_2|>\n product = response.meta['product']\n cipherid = response.meta['cipherid']\n reviews = json.loads(response.body)\n if not reviews:\n return product\n product['reviews'] = product.get('reviews') or []\n for each in reviews:\n review = self.extract_review(each)\n review['reviewer'] = self.extract_reviewer(each)\n product['reviews'].append(review)\n limit = response.meta.get('limit', 20)\n offset = response.meta.get('offset', 0) + limit\n sort = response.meta.get('sort', 'helpful')\n return scrapy.Request(self.build_review_url(cipherid, offset, limit, sort), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid, 'offset': offset, 'limit': limit})\n<|end_body_2|>\n\n<|body_start_3|>\n params = {'offset': offset, 'limit': limit, 'sort': sort}\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)\n<|end_body_3|>\n\n<|body_start_4|>\n review_loader = ReviewItemLoader(ReviewItem())\n review_loader.add_value('title', selector['shortText'])\n review_loader.add_value('description', selector['text'])\n review_loader.add_value('rating', selector['rating'])\n review_loader.add_value('helpfulCount', selector['likesCount'])\n review_loader.add_value('reviewImage', selector['images'][0]['clUrl'] if selector['images'] else None)\n review_loader.add_value('datePublished', selector['isoDate'])\n return review_loader.load_item()\n<|end_body_4|>\n\n<|body_start_5|>\n reviewer_loader = ReviewerItemLoader(ReviewerItem())\n reviewer_loader.add_value('name', selector['userDisplayName'])\n reviewer_loader.add_value('profileUrl', selector['userUrl'])\n return reviewer_loader.load_item()\n<|end_body_5|>\n", "revision_id": "67eeb08962725fd3aff8c8cb7e16360ffd651f06", "skeleton": "<|skeleton|>\nclass BeautylishProductsSpider:\n \"\"\"Beautylish Products Spider\"\"\"\n\n def parse(self, response):\n \"\"\"Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\"\"\"\n <|body_0|>\n\n def parse_product(self, response):\n \"\"\"Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\"\"\"\n <|body_1|>\n\n def parse_reviews(self, response):\n \"\"\"Extract reviews data\"\"\"\n <|body_2|>\n\n def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n \"\"\"Build review url from cipherid\"\"\"\n <|body_3|>\n\n def extract_review(self, selector):\n \"\"\"Extract review\"\"\"\n <|body_4|>\n\n def extract_reviewer(self, selector):\n \"\"\"Extract reviewer\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BeautylishProductsSpider:\n \"\"\"Beautylish Products Spider\"\"\"\n\n def parse(self, response):\n \"\"\"Extract product links, follow them and go to next page if exists @url https://www.beautylish.com/shop/browse @returns requests 1 @returns items 0 0\"\"\"\n products = response.xpath('//a[@class=\"tile\"]')\n for product in products:\n href = product.xpath('@href').extract_first()\n yield response.follow(href, callback=self.parse_product)\n next_page = response.xpath('//span[@class=\"pager_next\"]/a')\n if next_page:\n href = next_page.xpath('@href').extract_first()\n yield response.follow(href)\n\n def parse_product(self, response):\n \"\"\"Extract product details @url https://www.beautylish.com/s/jeffree-star-cosmetics-holographic-makeup-bag-black @returns requests 1 1\"\"\"\n encoded = response.xpath('//script').re_first('window.scriptCtx = \"([^\"]+)\"')\n decoded = base64.b64decode(encoded)\n data = json.loads(decoded)\n product_loader = ProductItemLoader(ProductItem(), response)\n product_loader.add_xpath('gtin', '//span[@itemprop=\"gtin13\"]')\n product_loader.add_xpath('name', '//h1[@itemprop=\"name\"]')\n product_loader.add_xpath('brandLogo', '//h3[@itemprop=\"brand\"]/link[@itemprop=\"logo\"]/@href')\n product_loader.add_xpath('brand', '//h3[@itemprop=\"brand\"]/a[@itemprop=\"name\"]')\n product_loader.add_xpath('category', '//ul[@itemprop=\"breadcrumb\"]/li')\n product_loader.add_xpath('rating', '//meta[@itemprop=\"ratingValue\"]/@content')\n product_loader.add_xpath('reviewCount', '//meta[@itemprop=\"reviewCount\"]/@content')\n product_loader.add_xpath('priceCurrency', '//meta[@itemprop=\"priceCurrency\"]/@content')\n product_loader.add_xpath('price', '//div[@itemprop=\"price\"]')\n product_loader.add_xpath('image', '//img[@itemprop=\"image\"]/@src')\n product_loader.add_xpath('ingredients', '//div[@id=\"accord-ingredients\"]')\n product_loader.add_xpath('availability', '//link[@itemprop=\"availability\"]/@href')\n product_loader.add_xpath('shipping', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_time\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('returnPolicy', '//span[@class=\"img\"][span[contains(@class, \"shipIcon_returns\")]]/following-sibling::div[@class=\"body\"]')\n product_loader.add_xpath('description', '//div[@id=\"desc-accord-content\"]')\n product_loader.add_value('url', response.url)\n product = product_loader.load_item()\n if product['reviewCount']:\n cipherid = data['ProductApp']['product']['cipheredId']\n return scrapy.Request(self.build_review_url(cipherid), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid})\n return product\n\n def parse_reviews(self, response):\n \"\"\"Extract reviews data\"\"\"\n product = response.meta['product']\n cipherid = response.meta['cipherid']\n reviews = json.loads(response.body)\n if not reviews:\n return product\n product['reviews'] = product.get('reviews') or []\n for each in reviews:\n review = self.extract_review(each)\n review['reviewer'] = self.extract_reviewer(each)\n product['reviews'].append(review)\n limit = response.meta.get('limit', 20)\n offset = response.meta.get('offset', 0) + limit\n sort = response.meta.get('sort', 'helpful')\n return scrapy.Request(self.build_review_url(cipherid, offset, limit, sort), callback=self.parse_reviews, meta={'product': product, 'cipherid': cipherid, 'offset': offset, 'limit': limit})\n\n def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n \"\"\"Build review url from cipherid\"\"\"\n params = {'offset': offset, 'limit': limit, 'sort': sort}\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)\n\n def extract_review(self, selector):\n \"\"\"Extract review\"\"\"\n review_loader = ReviewItemLoader(ReviewItem())\n review_loader.add_value('title', selector['shortText'])\n review_loader.add_value('description', selector['text'])\n review_loader.add_value('rating', selector['rating'])\n review_loader.add_value('helpfulCount', selector['likesCount'])\n review_loader.add_value('reviewImage', selector['images'][0]['clUrl'] if selector['images'] else None)\n review_loader.add_value('datePublished', selector['isoDate'])\n return review_loader.load_item()\n\n def extract_reviewer(self, selector):\n \"\"\"Extract reviewer\"\"\"\n reviewer_loader = ReviewerItemLoader(ReviewerItem())\n reviewer_loader.add_value('name', selector['userDisplayName'])\n reviewer_loader.add_value('profileUrl', selector['userUrl'])\n return reviewer_loader.load_item()\n", "source": "the_stack_v2_python_sparse", "source_path": "pipeline/pipeline/spiders/beautylish.py", "source_repo": "DataRetrieval/pipeline", "split": "val", "star_events_count": 1} {"blob_id": "3c8904294cda68b80c523771b10891d9064201bd", "bodies": ["path = [six.text_type(edge) for edge in edges]\nif len(field_names) > 1:\n path.append('{{{}}}'.format(','.join(sorted(field_names))))\nelif field_names:\n path.append(field_names[0])\nreturn '.'.join(path)", "type_ = type(message).__name__\nbase_msg = 'Failed to parse value(s) in protobuf [{type_}]:'.format(type_=type_)\nerror_paths = [' {type_}.{path}'.format(type_=type_, path=cls._FormatProtoPath(edges, field_names)) for edges, field_names in errors]\nreturn cls('\\n'.join([base_msg] + error_paths))"], "bodies_text": "<|body_start_0|>\n path = [six.text_type(edge) for edge in edges]\n if len(field_names) > 1:\n path.append('{{{}}}'.format(','.join(sorted(field_names))))\n elif field_names:\n path.append(field_names[0])\n return '.'.join(path)\n<|end_body_0|>\n\n<|body_start_1|>\n type_ = type(message).__name__\n base_msg = 'Failed to parse value(s) in protobuf [{type_}]:'.format(type_=type_)\n error_paths = [' {type_}.{path}'.format(type_=type_, path=cls._FormatProtoPath(edges, field_names)) for edges, field_names in errors]\n return cls('\\n'.join([base_msg] + error_paths))\n<|end_body_1|>\n", "class_docstring": "Indicates an error in decoding a protorpclite message.", "class_name": "DecodeError", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DecodeError:\n \"\"\"Indicates an error in decoding a protorpclite message.\"\"\"\n\n def _FormatProtoPath(cls, edges, field_names):\n \"\"\"Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\"\"\"\n <|body_0|>\n\n def FromErrorPaths(cls, message, errors):\n \"\"\"Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = [six.text_type(edge) for edge in edges]\n if len(field_names) > 1:\n path.append('{{{}}}'.format(','.join(sorted(field_names))))\n elif field_names:\n path.append(field_names[0])\n return '.'.join(path)\n<|end_body_0|>\n\n<|body_start_1|>\n type_ = type(message).__name__\n base_msg = 'Failed to parse value(s) in protobuf [{type_}]:'.format(type_=type_)\n error_paths = [' {type_}.{path}'.format(type_=type_, path=cls._FormatProtoPath(edges, field_names)) for edges, field_names in errors]\n return cls('\\n'.join([base_msg] + error_paths))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000442", "length_bytes": 5880, "license_type": "permissive", "methods": [{"docstring": "Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__", "name": "_FormatProtoPath", "signature": "def _FormatProtoPath(cls, edges, field_names)"}, {"docstring": "Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.", "name": "FromErrorPaths", "signature": "def FromErrorPaths(cls, message, errors)"}], "n_methods": 2, "prompt": "Implement the Python class `DecodeError` described below.\n\nClass description:\nIndicates an error in decoding a protorpclite message.\n\nMethod signatures and docstrings:\n- def _FormatProtoPath(cls, edges, field_names): Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\n- def FromErrorPaths(cls, message, errors): Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.", "prompted_full_text": "Implement the Python class `DecodeError` described below.\n\nClass description:\nIndicates an error in decoding a protorpclite message.\n\nMethod signatures and docstrings:\n- def _FormatProtoPath(cls, edges, field_names): Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\n- def FromErrorPaths(cls, message, errors): Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.\n\n<|skeleton|>\nclass DecodeError:\n \"\"\"Indicates an error in decoding a protorpclite message.\"\"\"\n\n def _FormatProtoPath(cls, edges, field_names):\n \"\"\"Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\"\"\"\n <|body_0|>\n\n def FromErrorPaths(cls, message, errors):\n \"\"\"Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = [six.text_type(edge) for edge in edges]\n if len(field_names) > 1:\n path.append('{{{}}}'.format(','.join(sorted(field_names))))\n elif field_names:\n path.append(field_names[0])\n return '.'.join(path)\n<|end_body_0|>\n\n<|body_start_1|>\n type_ = type(message).__name__\n base_msg = 'Failed to parse value(s) in protobuf [{type_}]:'.format(type_=type_)\n error_paths = [' {type_}.{path}'.format(type_=type_, path=cls._FormatProtoPath(edges, field_names)) for edges, field_names in errors]\n return cls('\\n'.join([base_msg] + error_paths))\n<|end_body_1|>\n", "revision_id": "85bb264e273568b5a0408f733b403c56373e2508", "skeleton": "<|skeleton|>\nclass DecodeError:\n \"\"\"Indicates an error in decoding a protorpclite message.\"\"\"\n\n def _FormatProtoPath(cls, edges, field_names):\n \"\"\"Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\"\"\"\n <|body_0|>\n\n def FromErrorPaths(cls, message, errors):\n \"\"\"Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DecodeError:\n \"\"\"Indicates an error in decoding a protorpclite message.\"\"\"\n\n def _FormatProtoPath(cls, edges, field_names):\n \"\"\"Returns a string representation of a path to a proto field. The return value represents one or more fields in a python dictionary representation of a message (json/yaml) that could not be decoded into the message as a string. The format is a dot separated list of python like sub field references (name, name[index], name[name]). The final element of the returned dot separated path may be a comma separated list of names enclosed in curly braces to represent multiple subfields (see examples) Examples: o Reference to a single field that could not be decoded: 'a.b[1].c[x].d' o Reference to two subfields 'a.b[1].c[x].{d,e}' Args: edges: List of objects representing python field references (__str__\"\"\"\n path = [six.text_type(edge) for edge in edges]\n if len(field_names) > 1:\n path.append('{{{}}}'.format(','.join(sorted(field_names))))\n elif field_names:\n path.append(field_names[0])\n return '.'.join(path)\n\n def FromErrorPaths(cls, message, errors):\n \"\"\"Returns a DecodeError from a list of locations of errors. Args: message: The protorpc Message in which a parsing error occurred. errors: List[(edges, field_names)], A list of locations of errors encountered while decoding the message.\"\"\"\n type_ = type(message).__name__\n base_msg = 'Failed to parse value(s) in protobuf [{type_}]:'.format(type_=type_)\n error_paths = [' {type_}.{path}'.format(type_=type_, path=cls._FormatProtoPath(edges, field_names)) for edges, field_names in errors]\n return cls('\\n'.join([base_msg] + error_paths))\n", "source": "the_stack_v2_python_sparse", "source_path": "google-cloud-sdk/lib/googlecloudsdk/api_lib/util/messages.py", "source_repo": "bopopescu/socialliteapp", "split": "val", "star_events_count": 0} {"blob_id": "4c672518cb3a862842e2fa52f46c34befbc17dd8", "bodies": ["buildflags = OrderedDict()\nif NBURN is not None:\n buildflags['NBURN'] = str(NBURN)\nif JMZ is not None:\n buildflags['JMZ'] = str(JMZ)\nif FULDAT is not None:\n buildflags['FULDAT'] = FULDAT\nif NAME is not None:\n buildflags['NAME'] = NAME\nself.buildflags = buildflags\nself.makeflags = [f'{k}={v}' for k, v in buildflags.items()]\nself.update = update\nversionflags = buildflags.copy()\nif 'FULDAT' in versionflags:\n versionflags['FULDAT'] = versionflags['FULDAT'].replace('.', '_')\nself.version = '_' + _versionfunc(versionflags, machine=machine, hashing=hashing)\nif len(versionflags) == 0:\n print('[build] building with defaults from Makefile.')\nself.kepler_source_path = os.path.join(os.environ['KEPLER_PATH'], 'source')\nself.kepler_library_path = os.path.join(self.path, f'_kepler{self.version}')\nself.kepler_library_file = os.path.join(self.kepler_library_path, 'kepler.a')\nself.project_libraries = (self.kepler_library_file,)\nself.include_paths = (self.kepler_source_path, self.kepler_library_path)\nself.signature_file = f'_kepler{self.version}.pyf'\nself.module = f'_kepler{self.version}'\nself.executable_file = 'kepler{self.version}.exe'\nsuper().__init__()", "try:\n library_time = os.path.getctime(self.kepler_library_file)\n if self.update == False:\n return False\nexcept FileNotFoundError:\n library_time = 0\nexclude = ('uuidcom', 'gitcom', 'nburncom', 'gridcom')\npatterns = ('*com', '*.f', '*.f90', '*.c', 'Makefile*')\nmakefile = os.path.join(self.path, 'Makefile')\nlast_time = os.path.getctime(makefile)\nfor p in patterns:\n for f in glob.glob(os.path.join(self.kepler_source_path, p)):\n if os.path.basename(f) in exclude:\n continue\n last_time = max(last_time, os.path.getctime(f))\nif last_time > library_time:\n cwd = os.getcwd()\n try:\n os.mkdir(self.kepler_library_path)\n except FileExistsError:\n pass\n os.chdir(self.kepler_library_path)\n cmd = ['make', '-j', '-f', makefile] + self.makeflags\n subprocess.run(cmd, shell=False, check=True)\n os.chdir(cwd)\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n buildflags = OrderedDict()\n if NBURN is not None:\n buildflags['NBURN'] = str(NBURN)\n if JMZ is not None:\n buildflags['JMZ'] = str(JMZ)\n if FULDAT is not None:\n buildflags['FULDAT'] = FULDAT\n if NAME is not None:\n buildflags['NAME'] = NAME\n self.buildflags = buildflags\n self.makeflags = [f'{k}={v}' for k, v in buildflags.items()]\n self.update = update\n versionflags = buildflags.copy()\n if 'FULDAT' in versionflags:\n versionflags['FULDAT'] = versionflags['FULDAT'].replace('.', '_')\n self.version = '_' + _versionfunc(versionflags, machine=machine, hashing=hashing)\n if len(versionflags) == 0:\n print('[build] building with defaults from Makefile.')\n self.kepler_source_path = os.path.join(os.environ['KEPLER_PATH'], 'source')\n self.kepler_library_path = os.path.join(self.path, f'_kepler{self.version}')\n self.kepler_library_file = os.path.join(self.kepler_library_path, 'kepler.a')\n self.project_libraries = (self.kepler_library_file,)\n self.include_paths = (self.kepler_source_path, self.kepler_library_path)\n self.signature_file = f'_kepler{self.version}.pyf'\n self.module = f'_kepler{self.version}'\n self.executable_file = 'kepler{self.version}.exe'\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n library_time = os.path.getctime(self.kepler_library_file)\n if self.update == False:\n return False\n except FileNotFoundError:\n library_time = 0\n exclude = ('uuidcom', 'gitcom', 'nburncom', 'gridcom')\n patterns = ('*com', '*.f', '*.f90', '*.c', 'Makefile*')\n makefile = os.path.join(self.path, 'Makefile')\n last_time = os.path.getctime(makefile)\n for p in patterns:\n for f in glob.glob(os.path.join(self.kepler_source_path, p)):\n if os.path.basename(f) in exclude:\n continue\n last_time = max(last_time, os.path.getctime(f))\n if last_time > library_time:\n cwd = os.getcwd()\n try:\n os.mkdir(self.kepler_library_path)\n except FileExistsError:\n pass\n os.chdir(self.kepler_library_path)\n cmd = ['make', '-j', '-f', makefile] + self.makeflags\n subprocess.run(cmd, shell=False, check=True)\n os.chdir(cwd)\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "_BuildKepler", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _BuildKepler:\n\n def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True):\n \"\"\"Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\"\"\"\n <|body_0|>\n\n def build_library_check(self, debug=True):\n \"\"\"check whether KEPLER library is up to date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n buildflags = OrderedDict()\n if NBURN is not None:\n buildflags['NBURN'] = str(NBURN)\n if JMZ is not None:\n buildflags['JMZ'] = str(JMZ)\n if FULDAT is not None:\n buildflags['FULDAT'] = FULDAT\n if NAME is not None:\n buildflags['NAME'] = NAME\n self.buildflags = buildflags\n self.makeflags = [f'{k}={v}' for k, v in buildflags.items()]\n self.update = update\n versionflags = buildflags.copy()\n if 'FULDAT' in versionflags:\n versionflags['FULDAT'] = versionflags['FULDAT'].replace('.', '_')\n self.version = '_' + _versionfunc(versionflags, machine=machine, hashing=hashing)\n if len(versionflags) == 0:\n print('[build] building with defaults from Makefile.')\n self.kepler_source_path = os.path.join(os.environ['KEPLER_PATH'], 'source')\n self.kepler_library_path = os.path.join(self.path, f'_kepler{self.version}')\n self.kepler_library_file = os.path.join(self.kepler_library_path, 'kepler.a')\n self.project_libraries = (self.kepler_library_file,)\n self.include_paths = (self.kepler_source_path, self.kepler_library_path)\n self.signature_file = f'_kepler{self.version}.pyf'\n self.module = f'_kepler{self.version}'\n self.executable_file = 'kepler{self.version}.exe'\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n library_time = os.path.getctime(self.kepler_library_file)\n if self.update == False:\n return False\n except FileNotFoundError:\n library_time = 0\n exclude = ('uuidcom', 'gitcom', 'nburncom', 'gridcom')\n patterns = ('*com', '*.f', '*.f90', '*.c', 'Makefile*')\n makefile = os.path.join(self.path, 'Makefile')\n last_time = os.path.getctime(makefile)\n for p in patterns:\n for f in glob.glob(os.path.join(self.kepler_source_path, p)):\n if os.path.basename(f) in exclude:\n continue\n last_time = max(last_time, os.path.getctime(f))\n if last_time > library_time:\n cwd = os.getcwd()\n try:\n os.mkdir(self.kepler_library_path)\n except FileExistsError:\n pass\n os.chdir(self.kepler_library_path)\n cmd = ['make', '-j', '-f', makefile] + self.makeflags\n subprocess.run(cmd, shell=False, check=True)\n os.chdir(cwd)\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000443", "length_bytes": 4916, "license_type": "permissive", "methods": [{"docstring": "Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...", "name": "__init__", "signature": "def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True)"}, {"docstring": "check whether KEPLER library is up to date", "name": "build_library_check", "signature": "def build_library_check(self, debug=True)"}], "n_methods": 2, "prompt": "Implement the Python class `_BuildKepler` described below.\n\nClass description:\nImplement the _BuildKepler class.\n\nMethod signatures and docstrings:\n- def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True): Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\n- def build_library_check(self, debug=True): check whether KEPLER library is up to date", "prompted_full_text": "Implement the Python class `_BuildKepler` described below.\n\nClass description:\nImplement the _BuildKepler class.\n\nMethod signatures and docstrings:\n- def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True): Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\n- def build_library_check(self, debug=True): check whether KEPLER library is up to date\n\n<|skeleton|>\nclass _BuildKepler:\n\n def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True):\n \"\"\"Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\"\"\"\n <|body_0|>\n\n def build_library_check(self, debug=True):\n \"\"\"check whether KEPLER library is up to date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n buildflags = OrderedDict()\n if NBURN is not None:\n buildflags['NBURN'] = str(NBURN)\n if JMZ is not None:\n buildflags['JMZ'] = str(JMZ)\n if FULDAT is not None:\n buildflags['FULDAT'] = FULDAT\n if NAME is not None:\n buildflags['NAME'] = NAME\n self.buildflags = buildflags\n self.makeflags = [f'{k}={v}' for k, v in buildflags.items()]\n self.update = update\n versionflags = buildflags.copy()\n if 'FULDAT' in versionflags:\n versionflags['FULDAT'] = versionflags['FULDAT'].replace('.', '_')\n self.version = '_' + _versionfunc(versionflags, machine=machine, hashing=hashing)\n if len(versionflags) == 0:\n print('[build] building with defaults from Makefile.')\n self.kepler_source_path = os.path.join(os.environ['KEPLER_PATH'], 'source')\n self.kepler_library_path = os.path.join(self.path, f'_kepler{self.version}')\n self.kepler_library_file = os.path.join(self.kepler_library_path, 'kepler.a')\n self.project_libraries = (self.kepler_library_file,)\n self.include_paths = (self.kepler_source_path, self.kepler_library_path)\n self.signature_file = f'_kepler{self.version}.pyf'\n self.module = f'_kepler{self.version}'\n self.executable_file = 'kepler{self.version}.exe'\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n library_time = os.path.getctime(self.kepler_library_file)\n if self.update == False:\n return False\n except FileNotFoundError:\n library_time = 0\n exclude = ('uuidcom', 'gitcom', 'nburncom', 'gridcom')\n patterns = ('*com', '*.f', '*.f90', '*.c', 'Makefile*')\n makefile = os.path.join(self.path, 'Makefile')\n last_time = os.path.getctime(makefile)\n for p in patterns:\n for f in glob.glob(os.path.join(self.kepler_source_path, p)):\n if os.path.basename(f) in exclude:\n continue\n last_time = max(last_time, os.path.getctime(f))\n if last_time > library_time:\n cwd = os.getcwd()\n try:\n os.mkdir(self.kepler_library_path)\n except FileExistsError:\n pass\n os.chdir(self.kepler_library_path)\n cmd = ['make', '-j', '-f', makefile] + self.makeflags\n subprocess.run(cmd, shell=False, check=True)\n os.chdir(cwd)\n return True\n return False\n<|end_body_1|>\n", "revision_id": "98fc181bab054619d12ffa4173ad5c469803c2ec", "skeleton": "<|skeleton|>\nclass _BuildKepler:\n\n def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True):\n \"\"\"Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\"\"\"\n <|body_0|>\n\n def build_library_check(self, debug=True):\n \"\"\"check whether KEPLER library is up to date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _BuildKepler:\n def __init__(self, NBURN=None, JMZ=None, FULDAT=None, NAME=None, machine='CPU', hashing=False, update=True):\n \"\"\"Note - all initilisation needed is done in class definition for now. Maybe that should go here instead ...\"\"\"\n buildflags = OrderedDict()\n if NBURN is not None:\n buildflags['NBURN'] = str(NBURN)\n if JMZ is not None:\n buildflags['JMZ'] = str(JMZ)\n if FULDAT is not None:\n buildflags['FULDAT'] = FULDAT\n if NAME is not None:\n buildflags['NAME'] = NAME\n self.buildflags = buildflags\n self.makeflags = [f'{k}={v}' for k, v in buildflags.items()]\n self.update = update\n versionflags = buildflags.copy()\n if 'FULDAT' in versionflags:\n versionflags['FULDAT'] = versionflags['FULDAT'].replace('.', '_')\n self.version = '_' + _versionfunc(versionflags, machine=machine, hashing=hashing)\n if len(versionflags) == 0:\n print('[build] building with defaults from Makefile.')\n self.kepler_source_path = os.path.join(os.environ['KEPLER_PATH'], 'source')\n self.kepler_library_path = os.path.join(self.path, f'_kepler{self.version}')\n self.kepler_library_file = os.path.join(self.kepler_library_path, 'kepler.a')\n self.project_libraries = (self.kepler_library_file,)\n self.include_paths = (self.kepler_source_path, self.kepler_library_path)\n self.signature_file = f'_kepler{self.version}.pyf'\n self.module = f'_kepler{self.version}'\n self.executable_file = 'kepler{self.version}.exe'\n super().__init__()\n\n def build_library_check(self, debug=True):\n \"\"\"check whether KEPLER library is up to date\"\"\"\n try:\n library_time = os.path.getctime(self.kepler_library_file)\n if self.update == False:\n return False\n except FileNotFoundError:\n library_time = 0\n exclude = ('uuidcom', 'gitcom', 'nburncom', 'gridcom')\n patterns = ('*com', '*.f', '*.f90', '*.c', 'Makefile*')\n makefile = os.path.join(self.path, 'Makefile')\n last_time = os.path.getctime(makefile)\n for p in patterns:\n for f in glob.glob(os.path.join(self.kepler_source_path, p)):\n if os.path.basename(f) in exclude:\n continue\n last_time = max(last_time, os.path.getctime(f))\n if last_time > library_time:\n cwd = os.getcwd()\n try:\n os.mkdir(self.kepler_library_path)\n except FileExistsError:\n pass\n os.chdir(self.kepler_library_path)\n cmd = ['make', '-j', '-f', makefile] + self.makeflags\n subprocess.run(cmd, shell=False, check=True)\n os.chdir(cwd)\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "kepler_python_packages/python_scripts/kepler/code/_build.py", "source_repo": "adam-m-jcbs/xrb-sens-datashare", "split": "val", "star_events_count": 1} {"blob_id": "b38989d148a7bdbe085c2511acd1689c1b1fa96c", "bodies": ["if minfo is None:\n minfo = {}\nsuper(DumpNodeStatsMessage, self).__init__(minfo)\nself.IsSystemMessage = False\nself.IsForward = True\nself.IsReliable = True\nself.DomainList = minfo.get('DomainList', [])\nself.MetricList = minfo.get('MetricList', [])", "result = super(DumpNodeStatsMessage, self).dump()\nresult['DomainList'] = []\nfor domain in self.DomainList:\n result['DomainList'].append(domain)\nresult['MetricList'] = []\nfor metric in self.MetricList:\n result['MetricList'].append(metric)\nreturn result"], "bodies_text": "<|body_start_0|>\n if minfo is None:\n minfo = {}\n super(DumpNodeStatsMessage, self).__init__(minfo)\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n self.DomainList = minfo.get('DomainList', [])\n self.MetricList = minfo.get('MetricList', [])\n<|end_body_0|>\n\n<|body_start_1|>\n result = super(DumpNodeStatsMessage, self).dump()\n result['DomainList'] = []\n for domain in self.DomainList:\n result['DomainList'].append(domain)\n result['MetricList'] = []\n for metric in self.MetricList:\n result['MetricList'].append(metric)\n return result\n<|end_body_1|>\n", "class_docstring": "Dump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.", "class_name": "DumpNodeStatsMessage", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DumpNodeStatsMessage:\n \"\"\"Dump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\"\"\"\n\n def __init__(self, minfo=None):\n \"\"\"Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\"\"\"\n <|body_0|>\n\n def dump(self):\n \"\"\"Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if minfo is None:\n minfo = {}\n super(DumpNodeStatsMessage, self).__init__(minfo)\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n self.DomainList = minfo.get('DomainList', [])\n self.MetricList = minfo.get('MetricList', [])\n<|end_body_0|>\n\n<|body_start_1|>\n result = super(DumpNodeStatsMessage, self).dump()\n result['DomainList'] = []\n for domain in self.DomainList:\n result['DomainList'].append(domain)\n result['MetricList'] = []\n for metric in self.MetricList:\n result['MetricList'].append(metric)\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000444", "length_bytes": 13482, "license_type": "permissive", "methods": [{"docstring": "Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.", "name": "__init__", "signature": "def __init__(self, minfo=None)"}, {"docstring": "Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.", "name": "dump", "signature": "def dump(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000336", "prompt": "Implement the Python class `DumpNodeStatsMessage` described below.\n\nClass description:\nDump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\n\nMethod signatures and docstrings:\n- def __init__(self, minfo=None): Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\n- def dump(self): Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.", "prompted_full_text": "Implement the Python class `DumpNodeStatsMessage` described below.\n\nClass description:\nDump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\n\nMethod signatures and docstrings:\n- def __init__(self, minfo=None): Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\n- def dump(self): Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.\n\n<|skeleton|>\nclass DumpNodeStatsMessage:\n \"\"\"Dump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\"\"\"\n\n def __init__(self, minfo=None):\n \"\"\"Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\"\"\"\n <|body_0|>\n\n def dump(self):\n \"\"\"Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if minfo is None:\n minfo = {}\n super(DumpNodeStatsMessage, self).__init__(minfo)\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n self.DomainList = minfo.get('DomainList', [])\n self.MetricList = minfo.get('MetricList', [])\n<|end_body_0|>\n\n<|body_start_1|>\n result = super(DumpNodeStatsMessage, self).dump()\n result['DomainList'] = []\n for domain in self.DomainList:\n result['DomainList'].append(domain)\n result['MetricList'] = []\n for metric in self.MetricList:\n result['MetricList'].append(metric)\n return result\n<|end_body_1|>\n", "revision_id": "8f4ca1aab54ef420a0db10c8ca822ec8686cd423", "skeleton": "<|skeleton|>\nclass DumpNodeStatsMessage:\n \"\"\"Dump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\"\"\"\n\n def __init__(self, minfo=None):\n \"\"\"Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\"\"\"\n <|body_0|>\n\n def dump(self):\n \"\"\"Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DumpNodeStatsMessage:\n \"\"\"Dump node stats messages are sent to a peer node to request it to dump statistics. Attributes: DumpNodeStatsMessage.MessageType (str): The class name of the message. IsSystemMessage (bool): Whether or not this is a system message. System messages have special delivery priority rules. IsForward (bool): Whether the message should be automatically forwarded. IsReliable (bool): Whether reliable delivery is required. DomainList (list): A list of domains to dump stats for. MetricList (list): A list of stats to dump.\"\"\"\n\n def __init__(self, minfo=None):\n \"\"\"Constructor for the DumpNodeStatsMessage class. Args: minfo (dict): Dictionary of values for message fields.\"\"\"\n if minfo is None:\n minfo = {}\n super(DumpNodeStatsMessage, self).__init__(minfo)\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True\n self.DomainList = minfo.get('DomainList', [])\n self.MetricList = minfo.get('MetricList', [])\n\n def dump(self):\n \"\"\"Dumps a dict containing object attributes. Returns: dict: A mapping of object attribute names to values.\"\"\"\n result = super(DumpNodeStatsMessage, self).dump()\n result['DomainList'] = []\n for domain in self.DomainList:\n result['DomainList'].append(domain)\n result['MetricList'] = []\n for metric in self.MetricList:\n result['MetricList'].append(metric)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "validator/gossip/messages/gossip_debug.py", "source_repo": "aludvik/sawtooth-core", "split": "val", "star_events_count": 0} {"blob_id": "6ff0920af5e23e593568f640693e6fff89f864b8", "bodies": ["self.dict = {}\nfor i in range(len(words)):\n w = words[i]\n if w in self.dict:\n self.dict[w].append(i)\n else:\n self.dict[w] = [i]", "ix1 = self.dict[word1]\nix2 = self.dict[word2]\ni1, i2 = (0, 0)\nret = float('inf')\nwhile i1 < len(ix1) and i2 < len(ix2):\n ret = min(ret, abs(ix2[i2] - ix1[i1]))\n if ix1[i1] > ix2[i2]:\n i2 += 1\n else:\n i1 += 1\nreturn ret"], "bodies_text": "<|body_start_0|>\n self.dict = {}\n for i in range(len(words)):\n w = words[i]\n if w in self.dict:\n self.dict[w].append(i)\n else:\n self.dict[w] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n ix1 = self.dict[word1]\n ix2 = self.dict[word2]\n i1, i2 = (0, 0)\n ret = float('inf')\n while i1 < len(ix1) and i2 < len(ix2):\n ret = min(ret, abs(ix2[i2] - ix1[i1]))\n if ix1[i1] > ix2[i2]:\n i2 += 1\n else:\n i1 += 1\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "WordDistance", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\":type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dict = {}\n for i in range(len(words)):\n w = words[i]\n if w in self.dict:\n self.dict[w].append(i)\n else:\n self.dict[w] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n ix1 = self.dict[word1]\n ix2 = self.dict[word2]\n i1, i2 = (0, 0)\n ret = float('inf')\n while i1 < len(ix1) and i2 < len(ix2):\n ret = min(ret, abs(ix2[i2] - ix1[i1]))\n if ix1[i1] > ix2[i2]:\n i2 += 1\n else:\n i1 += 1\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000445", "length_bytes": 950, "license_type": "no_license", "methods": [{"docstring": ":type words: List[str]", "name": "__init__", "signature": "def __init__(self, words)"}, {"docstring": ":type word1: str :type word2: str :rtype: int", "name": "shortest", "signature": "def shortest(self, word1, word2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000324", "prompt": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): :type words: List[str]\n- def shortest(self, word1, word2): :type word1: str :type word2: str :rtype: int", "prompted_full_text": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): :type words: List[str]\n- def shortest(self, word1, word2): :type word1: str :type word2: str :rtype: int\n\n<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\":type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dict = {}\n for i in range(len(words)):\n w = words[i]\n if w in self.dict:\n self.dict[w].append(i)\n else:\n self.dict[w] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n ix1 = self.dict[word1]\n ix2 = self.dict[word2]\n i1, i2 = (0, 0)\n ret = float('inf')\n while i1 < len(ix1) and i2 < len(ix2):\n ret = min(ret, abs(ix2[i2] - ix1[i1]))\n if ix1[i1] > ix2[i2]:\n i2 += 1\n else:\n i1 += 1\n return ret\n<|end_body_1|>\n", "revision_id": "f7cb7cfa6e1f04efd741c2456ad930db48101573", "skeleton": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\":type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WordDistance:\n def __init__(self, words):\n \"\"\":type words: List[str]\"\"\"\n self.dict = {}\n for i in range(len(words)):\n w = words[i]\n if w in self.dict:\n self.dict[w].append(i)\n else:\n self.dict[w] = [i]\n\n def shortest(self, word1, word2):\n \"\"\":type word1: str :type word2: str :rtype: int\"\"\"\n ix1 = self.dict[word1]\n ix2 = self.dict[word2]\n i1, i2 = (0, 0)\n ret = float('inf')\n while i1 < len(ix1) and i2 < len(ix2):\n ret = min(ret, abs(ix2[i2] - ix1[i1]))\n if ix1[i1] > ix2[i2]:\n i2 += 1\n else:\n i1 += 1\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "244.shortestWordDist2.py", "source_repo": "umnstao/leetcodeOJ", "split": "val", "star_events_count": 0} {"blob_id": "6841554449325a7e5ed247c38e9b3ef7792cbd3c", "bodies": ["if page_url is None or html_cont is None:\n return\nsoup = BeautifulSoup(html_cont, 'html.parser')\nnew_urls = self._get_new_urls(page_url, soup)\nnew_data = self._get_new_data(page_url, soup)\nreturn (new_urls, new_data)", "new_urls = set()\nlinks = soup.find_all('a', href=re.compile('/item/\\\\w+'))\nfor link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\nreturn new_urls", "data = {}\ndata['url'] = page_url\ntitle = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\ndata['title'] = title.get_text()\nsummary = soup.find('div', class_='lemma-summary')\ndata['summary'] = summary.get_text()\nreturn data"], "bodies_text": "<|body_start_0|>\n if page_url is None or html_cont is None:\n return\n soup = BeautifulSoup(html_cont, 'html.parser')\n new_urls = self._get_new_urls(page_url, soup)\n new_data = self._get_new_data(page_url, soup)\n return (new_urls, new_data)\n<|end_body_0|>\n\n<|body_start_1|>\n new_urls = set()\n links = soup.find_all('a', href=re.compile('/item/\\\\w+'))\n for link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\n return new_urls\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data\n<|end_body_2|>\n", "class_docstring": "HTML解析器", "class_name": "HtmlParser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HtmlParser:\n \"\"\"HTML解析器\"\"\"\n\n def parser(self, page_url, html_cont):\n \"\"\"parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\"\"\"\n <|body_0|>\n\n def _get_new_urls(self, page_url, soup):\n \"\"\"get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\"\"\"\n <|body_1|>\n\n def _get_new_data(self, page_url, soup):\n \"\"\"get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if page_url is None or html_cont is None:\n return\n soup = BeautifulSoup(html_cont, 'html.parser')\n new_urls = self._get_new_urls(page_url, soup)\n new_data = self._get_new_data(page_url, soup)\n return (new_urls, new_data)\n<|end_body_0|>\n\n<|body_start_1|>\n new_urls = set()\n links = soup.find_all('a', href=re.compile('/item/\\\\w+'))\n for link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\n return new_urls\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000446", "length_bytes": 5667, "license_type": "no_license", "methods": [{"docstring": "parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url", "name": "parser", "signature": "def parser(self, page_url, html_cont)"}, {"docstring": "get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download", "name": "_get_new_urls", "signature": "def _get_new_urls(self, page_url, soup)"}, {"docstring": "get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content", "name": "_get_new_data", "signature": "def _get_new_data(self, page_url, soup)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004609", "prompt": "Implement the Python class `HtmlParser` described below.\n\nClass description:\nHTML解析器\n\nMethod signatures and docstrings:\n- def parser(self, page_url, html_cont): parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\n- def _get_new_urls(self, page_url, soup): get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\n- def _get_new_data(self, page_url, soup): get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content", "prompted_full_text": "Implement the Python class `HtmlParser` described below.\n\nClass description:\nHTML解析器\n\nMethod signatures and docstrings:\n- def parser(self, page_url, html_cont): parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\n- def _get_new_urls(self, page_url, soup): get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\n- def _get_new_data(self, page_url, soup): get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content\n\n<|skeleton|>\nclass HtmlParser:\n \"\"\"HTML解析器\"\"\"\n\n def parser(self, page_url, html_cont):\n \"\"\"parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\"\"\"\n <|body_0|>\n\n def _get_new_urls(self, page_url, soup):\n \"\"\"get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\"\"\"\n <|body_1|>\n\n def _get_new_data(self, page_url, soup):\n \"\"\"get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if page_url is None or html_cont is None:\n return\n soup = BeautifulSoup(html_cont, 'html.parser')\n new_urls = self._get_new_urls(page_url, soup)\n new_data = self._get_new_data(page_url, soup)\n return (new_urls, new_data)\n<|end_body_0|>\n\n<|body_start_1|>\n new_urls = set()\n links = soup.find_all('a', href=re.compile('/item/\\\\w+'))\n for link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\n return new_urls\n<|end_body_1|>\n\n<|body_start_2|>\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data\n<|end_body_2|>\n", "revision_id": "673993d4d197138e89c2952d2be64b95463b19e9", "skeleton": "<|skeleton|>\nclass HtmlParser:\n \"\"\"HTML解析器\"\"\"\n\n def parser(self, page_url, html_cont):\n \"\"\"parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\"\"\"\n <|body_0|>\n\n def _get_new_urls(self, page_url, soup):\n \"\"\"get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\"\"\"\n <|body_1|>\n\n def _get_new_data(self, page_url, soup):\n \"\"\"get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HtmlParser:\n \"\"\"HTML解析器\"\"\"\n\n def parser(self, page_url, html_cont):\n \"\"\"parse a given html page :param page_url: the url path :param html_cont: the content of this url :return: a turple about new urls and the dictionary of this url\"\"\"\n if page_url is None or html_cont is None:\n return\n soup = BeautifulSoup(html_cont, 'html.parser')\n new_urls = self._get_new_urls(page_url, soup)\n new_data = self._get_new_data(page_url, soup)\n return (new_urls, new_data)\n\n def _get_new_urls(self, page_url, soup):\n \"\"\"get some new urls which need to download through this page url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the new urls set need to download\"\"\"\n new_urls = set()\n links = soup.find_all('a', href=re.compile('/item/\\\\w+'))\n for link in links:\n new_url = link['href']\n new_full_url = urljoin(page_url, new_url)\n new_urls.add(new_full_url)\n return new_urls\n\n def _get_new_data(self, page_url, soup):\n \"\"\"get content dictionary by given page_url :param page_url: the given url :param soup: the object of BeautifulSoup :return: the dictionary about this url content\"\"\"\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data\n", "source": "the_stack_v2_python_sparse", "source_path": "spider/baidubaike_spider.py", "source_repo": "XiDian-ChenMiao/python-master", "split": "val", "star_events_count": 0} {"blob_id": "1f1ce2c9c565816e0c806c3da2b884d1d71956e7", "bodies": ["if len(nums) < k:\n return False\ntotal = sum(nums)\nif total % k != 0:\n return False\ntarget = total / k\nused = [0] * len(nums)\ns = self.backtrack(k, 0, nums, 0, used, target)\nreturn s", "if k == 0:\n return True\nif cur_bucket_total == target:\n return self.backtrack(k - 1, 0, nums, 0, used, target)\nfor i in range(start, len(nums)):\n if used[i]:\n continue\n if nums[i] + cur_bucket_total > target:\n continue\n used[i] = True\n cur_bucket_total += nums[i]\n if self.backtrack(k, cur_bucket_total, nums, i + 1, used, target):\n return True\n used[i] = False\n cur_bucket_total -= nums[i]\nreturn False"], "bodies_text": "<|body_start_0|>\n if len(nums) < k:\n return False\n total = sum(nums)\n if total % k != 0:\n return False\n target = total / k\n used = [0] * len(nums)\n s = self.backtrack(k, 0, nums, 0, used, target)\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n if k == 0:\n return True\n if cur_bucket_total == target:\n return self.backtrack(k - 1, 0, nums, 0, used, target)\n for i in range(start, len(nums)):\n if used[i]:\n continue\n if nums[i] + cur_bucket_total > target:\n continue\n used[i] = True\n cur_bucket_total += nums[i]\n if self.backtrack(k, cur_bucket_total, nums, i + 1, used, target):\n return True\n used[i] = False\n cur_bucket_total -= nums[i]\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def is_possible_divide(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def backtrack(self, k, cur_bucket_total, nums, start, used, target):\n \"\"\"@param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) < k:\n return False\n total = sum(nums)\n if total % k != 0:\n return False\n target = total / k\n used = [0] * len(nums)\n s = self.backtrack(k, 0, nums, 0, used, target)\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n if k == 0:\n return True\n if cur_bucket_total == target:\n return self.backtrack(k - 1, 0, nums, 0, used, target)\n for i in range(start, len(nums)):\n if used[i]:\n continue\n if nums[i] + cur_bucket_total > target:\n continue\n used[i] = True\n cur_bucket_total += nums[i]\n if self.backtrack(k, cur_bucket_total, nums, i + 1, used, target):\n return True\n used[i] = False\n cur_bucket_total -= nums[i]\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000447", "length_bytes": 2042, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type k: int :rtype: bool", "name": "is_possible_divide", "signature": "def is_possible_divide(self, nums, k)"}, {"docstring": "@param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值", "name": "backtrack", "signature": "def backtrack(self, k, cur_bucket_total, nums, start, used, target)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def is_possible_divide(self, nums, k): :type nums: List[int] :type k: int :rtype: bool\n- def backtrack(self, k, cur_bucket_total, nums, start, used, target): @param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def is_possible_divide(self, nums, k): :type nums: List[int] :type k: int :rtype: bool\n- def backtrack(self, k, cur_bucket_total, nums, start, used, target): @param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值\n\n<|skeleton|>\nclass Solution:\n\n def is_possible_divide(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def backtrack(self, k, cur_bucket_total, nums, start, used, target):\n \"\"\"@param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) < k:\n return False\n total = sum(nums)\n if total % k != 0:\n return False\n target = total / k\n used = [0] * len(nums)\n s = self.backtrack(k, 0, nums, 0, used, target)\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n if k == 0:\n return True\n if cur_bucket_total == target:\n return self.backtrack(k - 1, 0, nums, 0, used, target)\n for i in range(start, len(nums)):\n if used[i]:\n continue\n if nums[i] + cur_bucket_total > target:\n continue\n used[i] = True\n cur_bucket_total += nums[i]\n if self.backtrack(k, cur_bucket_total, nums, i + 1, used, target):\n return True\n used[i] = False\n cur_bucket_total -= nums[i]\n return False\n<|end_body_1|>\n", "revision_id": "5ba3465ba9c85955eac188e1e3793a981de712e7", "skeleton": "<|skeleton|>\nclass Solution:\n\n def is_possible_divide(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n <|body_0|>\n\n def backtrack(self, k, cur_bucket_total, nums, start, used, target):\n \"\"\"@param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def is_possible_divide(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: bool\"\"\"\n if len(nums) < k:\n return False\n total = sum(nums)\n if total % k != 0:\n return False\n target = total / k\n used = [0] * len(nums)\n s = self.backtrack(k, 0, nums, 0, used, target)\n return s\n\n def backtrack(self, k, cur_bucket_total, nums, start, used, target):\n \"\"\"@param k: 待选择的桶编号 @param cur_bucket_total: 当前桶已经装的数字之和 @param nums: 待选择的数字列表 @param used: 已经选择过的索引 @param start: 开始遍历的位置 @param target: 目标值\"\"\"\n if k == 0:\n return True\n if cur_bucket_total == target:\n return self.backtrack(k - 1, 0, nums, 0, used, target)\n for i in range(start, len(nums)):\n if used[i]:\n continue\n if nums[i] + cur_bucket_total > target:\n continue\n used[i] = True\n cur_bucket_total += nums[i]\n if self.backtrack(k, cur_bucket_total, nums, i + 1, used, target):\n return True\n used[i] = False\n cur_bucket_total -= nums[i]\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "backtrack/698_划分为k个相等的子集.py", "source_repo": "SilvesSun/learn-algorithm-in-python", "split": "val", "star_events_count": 0} {"blob_id": "db764bd8f2b1f941f276c0f489b22c6ea1ff800e", "bodies": ["try:\n val = field.data.strip()\n if val:\n float(val)\n return True\nexcept ValueError:\n raise ValidationError('Invalid number provided(only numbers and 1 period allowed)')", "if not validator_names:\n return field\nfor i in xrange(0, len(validator_names)):\n if validator_names[i] == 'DataRequired':\n validator_names[i] = validators.DataRequired\nfield.validators = [x for x in field.validators if not type(x) in validator_names]\nreturn field"], "bodies_text": "<|body_start_0|>\n try:\n val = field.data.strip()\n if val:\n float(val)\n return True\n except ValueError:\n raise ValidationError('Invalid number provided(only numbers and 1 period allowed)')\n<|end_body_0|>\n\n<|body_start_1|>\n if not validator_names:\n return field\n for i in xrange(0, len(validator_names)):\n if validator_names[i] == 'DataRequired':\n validator_names[i] = validators.DataRequired\n field.validators = [x for x in field.validators if not type(x) in validator_names]\n return field\n<|end_body_1|>\n", "class_docstring": "Form to add a new media item", "class_name": "AddMediaForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AddMediaForm:\n \"\"\"Form to add a new media item\"\"\"\n\n def isNum(form, field):\n \"\"\"Check if the field\"s value is a number(integer or floating value)\"\"\"\n <|body_0|>\n\n def removeValidators(self, field, validator_names=None):\n \"\"\"Remove validator from the field's validator list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n val = field.data.strip()\n if val:\n float(val)\n return True\n except ValueError:\n raise ValidationError('Invalid number provided(only numbers and 1 period allowed)')\n<|end_body_0|>\n\n<|body_start_1|>\n if not validator_names:\n return field\n for i in xrange(0, len(validator_names)):\n if validator_names[i] == 'DataRequired':\n validator_names[i] = validators.DataRequired\n field.validators = [x for x in field.validators if not type(x) in validator_names]\n return field\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000448", "length_bytes": 4448, "license_type": "no_license", "methods": [{"docstring": "Check if the field\"s value is a number(integer or floating value)", "name": "isNum", "signature": "def isNum(form, field)"}, {"docstring": "Remove validator from the field's validator list", "name": "removeValidators", "signature": "def removeValidators(self, field, validator_names=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000797", "prompt": "Implement the Python class `AddMediaForm` described below.\n\nClass description:\nForm to add a new media item\n\nMethod signatures and docstrings:\n- def isNum(form, field): Check if the field\"s value is a number(integer or floating value)\n- def removeValidators(self, field, validator_names=None): Remove validator from the field's validator list", "prompted_full_text": "Implement the Python class `AddMediaForm` described below.\n\nClass description:\nForm to add a new media item\n\nMethod signatures and docstrings:\n- def isNum(form, field): Check if the field\"s value is a number(integer or floating value)\n- def removeValidators(self, field, validator_names=None): Remove validator from the field's validator list\n\n<|skeleton|>\nclass AddMediaForm:\n \"\"\"Form to add a new media item\"\"\"\n\n def isNum(form, field):\n \"\"\"Check if the field\"s value is a number(integer or floating value)\"\"\"\n <|body_0|>\n\n def removeValidators(self, field, validator_names=None):\n \"\"\"Remove validator from the field's validator list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n val = field.data.strip()\n if val:\n float(val)\n return True\n except ValueError:\n raise ValidationError('Invalid number provided(only numbers and 1 period allowed)')\n<|end_body_0|>\n\n<|body_start_1|>\n if not validator_names:\n return field\n for i in xrange(0, len(validator_names)):\n if validator_names[i] == 'DataRequired':\n validator_names[i] = validators.DataRequired\n field.validators = [x for x in field.validators if not type(x) in validator_names]\n return field\n<|end_body_1|>\n", "revision_id": "320ae68ce21b24dfa5902e8e5b6f4bb0cf1d504e", "skeleton": "<|skeleton|>\nclass AddMediaForm:\n \"\"\"Form to add a new media item\"\"\"\n\n def isNum(form, field):\n \"\"\"Check if the field\"s value is a number(integer or floating value)\"\"\"\n <|body_0|>\n\n def removeValidators(self, field, validator_names=None):\n \"\"\"Remove validator from the field's validator list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AddMediaForm:\n \"\"\"Form to add a new media item\"\"\"\n\n def isNum(form, field):\n \"\"\"Check if the field\"s value is a number(integer or floating value)\"\"\"\n try:\n val = field.data.strip()\n if val:\n float(val)\n return True\n except ValueError:\n raise ValidationError('Invalid number provided(only numbers and 1 period allowed)')\n\n def removeValidators(self, field, validator_names=None):\n \"\"\"Remove validator from the field's validator list\"\"\"\n if not validator_names:\n return field\n for i in xrange(0, len(validator_names)):\n if validator_names[i] == 'DataRequired':\n validator_names[i] = validators.DataRequired\n field.validators = [x for x in field.validators if not type(x) in validator_names]\n return field\n", "source": "the_stack_v2_python_sparse", "source_path": "mad/modules/media/forms.py", "source_repo": "jorluft/fla", "split": "val", "star_events_count": 0} {"blob_id": "d01e533c15be3ffa5d7717e6909ec649a258309c", "bodies": ["self.__ops = ops\nself.__nops = len(ops)\nfor iop in range(self.__nops):\n if not isinstance(self.__ops[iop], operator):\n raise Exception('Elements of ops list must be of type operator')\nif self.__nops != len(dims):\n raise Exception('Number of dimensions (%d) must equal number of operators (%d)' % (len(dims), self.__nops))\nself.__ddim = dims[-1]['nrows']\nfor idim in range(len(dims) - 1):\n if np.prod(dims[idim]['nrows']) != np.prod(dims[idim + 1]['nrows']):\n print('Operator %d has %d rows and operator %d has %d rows' % (idim, np.prod(dims[idim]['nrows']), idim + 1, np.prod(dims[idim + 1]['nrows'])))\n raise Exception(\"Dimensions of ops don't match\")\nself.__dims = dims", "totaldims = {}\ntotaldims['nrows'] = self.__ddim\ntotaldims['ncols'] = 0\nfor iop in range(self.__nops):\n totaldims['ncols'] += dims[iop]['ncols']\nreturn totaldims", "if not isinstance(mod, list):\n raise Exception('Input model must be a list of numpy arrays')\nif len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\nif dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\niadd = add\nfor iop in range(self.__nops):\n self.__ops[iop].forward(iadd, mod[iop], dat)\n iadd = True", "if not isinstance(mod, list):\n raise Exception('Output model must be a list of numpy arrays')\nif len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\nif dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\nfor iop in range(self.__nops):\n self.__ops[iop].forward(add, m[iop], data)"], "bodies_text": "<|body_start_0|>\n self.__ops = ops\n self.__nops = len(ops)\n for iop in range(self.__nops):\n if not isinstance(self.__ops[iop], operator):\n raise Exception('Elements of ops list must be of type operator')\n if self.__nops != len(dims):\n raise Exception('Number of dimensions (%d) must equal number of operators (%d)' % (len(dims), self.__nops))\n self.__ddim = dims[-1]['nrows']\n for idim in range(len(dims) - 1):\n if np.prod(dims[idim]['nrows']) != np.prod(dims[idim + 1]['nrows']):\n print('Operator %d has %d rows and operator %d has %d rows' % (idim, np.prod(dims[idim]['nrows']), idim + 1, np.prod(dims[idim + 1]['nrows'])))\n raise Exception(\"Dimensions of ops don't match\")\n self.__dims = dims\n<|end_body_0|>\n\n<|body_start_1|>\n totaldims = {}\n totaldims['nrows'] = self.__ddim\n totaldims['ncols'] = 0\n for iop in range(self.__nops):\n totaldims['ncols'] += dims[iop]['ncols']\n return totaldims\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(mod, list):\n raise Exception('Input model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n iadd = add\n for iop in range(self.__nops):\n self.__ops[iop].forward(iadd, mod[iop], dat)\n iadd = True\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(mod, list):\n raise Exception('Output model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n for iop in range(self.__nops):\n self.__ops[iop].forward(add, m[iop], data)\n<|end_body_3|>\n", "class_docstring": "Row operator", "class_name": "rowop", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass rowop:\n \"\"\"Row operator\"\"\"\n\n def __init__(self, ops, dims):\n \"\"\"rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\"\"\"\n <|body_0|>\n\n def totaldims(self):\n \"\"\"Returns the total dims of the row operator\"\"\"\n <|body_1|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\"\"\"\n <|body_2|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__ops = ops\n self.__nops = len(ops)\n for iop in range(self.__nops):\n if not isinstance(self.__ops[iop], operator):\n raise Exception('Elements of ops list must be of type operator')\n if self.__nops != len(dims):\n raise Exception('Number of dimensions (%d) must equal number of operators (%d)' % (len(dims), self.__nops))\n self.__ddim = dims[-1]['nrows']\n for idim in range(len(dims) - 1):\n if np.prod(dims[idim]['nrows']) != np.prod(dims[idim + 1]['nrows']):\n print('Operator %d has %d rows and operator %d has %d rows' % (idim, np.prod(dims[idim]['nrows']), idim + 1, np.prod(dims[idim + 1]['nrows'])))\n raise Exception(\"Dimensions of ops don't match\")\n self.__dims = dims\n<|end_body_0|>\n\n<|body_start_1|>\n totaldims = {}\n totaldims['nrows'] = self.__ddim\n totaldims['ncols'] = 0\n for iop in range(self.__nops):\n totaldims['ncols'] += dims[iop]['ncols']\n return totaldims\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(mod, list):\n raise Exception('Input model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n iadd = add\n for iop in range(self.__nops):\n self.__ops[iop].forward(iadd, mod[iop], dat)\n iadd = True\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(mod, list):\n raise Exception('Output model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n for iop in range(self.__nops):\n self.__ops[iop].forward(add, m[iop], data)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000449", "length_bytes": 13837, "license_type": "no_license", "methods": [{"docstring": "rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]", "name": "__init__", "signature": "def __init__(self, ops, dims)"}, {"docstring": "Returns the total dims of the row operator", "name": "totaldims", "signature": "def totaldims(self)"}, {"docstring": "Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector", "name": "forward", "signature": "def forward(self, add, mod, dat)"}, {"docstring": "Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector", "name": "adjoint", "signature": "def adjoint(self, add, mod, dat)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005781", "prompt": "Implement the Python class `rowop` described below.\n\nClass description:\nRow operator\n\nMethod signatures and docstrings:\n- def __init__(self, ops, dims): rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\n- def totaldims(self): Returns the total dims of the row operator\n- def forward(self, add, mod, dat): Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\n- def adjoint(self, add, mod, dat): Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector", "prompted_full_text": "Implement the Python class `rowop` described below.\n\nClass description:\nRow operator\n\nMethod signatures and docstrings:\n- def __init__(self, ops, dims): rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\n- def totaldims(self): Returns the total dims of the row operator\n- def forward(self, add, mod, dat): Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\n- def adjoint(self, add, mod, dat): Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector\n\n<|skeleton|>\nclass rowop:\n \"\"\"Row operator\"\"\"\n\n def __init__(self, ops, dims):\n \"\"\"rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\"\"\"\n <|body_0|>\n\n def totaldims(self):\n \"\"\"Returns the total dims of the row operator\"\"\"\n <|body_1|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\"\"\"\n <|body_2|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__ops = ops\n self.__nops = len(ops)\n for iop in range(self.__nops):\n if not isinstance(self.__ops[iop], operator):\n raise Exception('Elements of ops list must be of type operator')\n if self.__nops != len(dims):\n raise Exception('Number of dimensions (%d) must equal number of operators (%d)' % (len(dims), self.__nops))\n self.__ddim = dims[-1]['nrows']\n for idim in range(len(dims) - 1):\n if np.prod(dims[idim]['nrows']) != np.prod(dims[idim + 1]['nrows']):\n print('Operator %d has %d rows and operator %d has %d rows' % (idim, np.prod(dims[idim]['nrows']), idim + 1, np.prod(dims[idim + 1]['nrows'])))\n raise Exception(\"Dimensions of ops don't match\")\n self.__dims = dims\n<|end_body_0|>\n\n<|body_start_1|>\n totaldims = {}\n totaldims['nrows'] = self.__ddim\n totaldims['ncols'] = 0\n for iop in range(self.__nops):\n totaldims['ncols'] += dims[iop]['ncols']\n return totaldims\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(mod, list):\n raise Exception('Input model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n iadd = add\n for iop in range(self.__nops):\n self.__ops[iop].forward(iadd, mod[iop], dat)\n iadd = True\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(mod, list):\n raise Exception('Output model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n for iop in range(self.__nops):\n self.__ops[iop].forward(add, m[iop], data)\n<|end_body_3|>\n", "revision_id": "32a303eddd13385d8778b8bb3b4fbbfbe78bea51", "skeleton": "<|skeleton|>\nclass rowop:\n \"\"\"Row operator\"\"\"\n\n def __init__(self, ops, dims):\n \"\"\"rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\"\"\"\n <|body_0|>\n\n def totaldims(self):\n \"\"\"Returns the total dims of the row operator\"\"\"\n <|body_1|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\"\"\"\n <|body_2|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class rowop:\n \"\"\"Row operator\"\"\"\n\n def __init__(self, ops, dims):\n \"\"\"rowop constructor Parameters: ops - a list of operators used to form the row operator dims - a list of dictionaries that contain the dimensions of the inputs and outputs of the arrays For example dims = [{'nrows': 10, 'ncols': 10},...] Note that the row operator will be formed in the order in which the operators are supplied. For example ops = [A,B,C,...] will result in [A,B,C,...][ma] [mb] [mc] [::]\"\"\"\n self.__ops = ops\n self.__nops = len(ops)\n for iop in range(self.__nops):\n if not isinstance(self.__ops[iop], operator):\n raise Exception('Elements of ops list must be of type operator')\n if self.__nops != len(dims):\n raise Exception('Number of dimensions (%d) must equal number of operators (%d)' % (len(dims), self.__nops))\n self.__ddim = dims[-1]['nrows']\n for idim in range(len(dims) - 1):\n if np.prod(dims[idim]['nrows']) != np.prod(dims[idim + 1]['nrows']):\n print('Operator %d has %d rows and operator %d has %d rows' % (idim, np.prod(dims[idim]['nrows']), idim + 1, np.prod(dims[idim + 1]['nrows'])))\n raise Exception(\"Dimensions of ops don't match\")\n self.__dims = dims\n\n def totaldims(self):\n \"\"\"Returns the total dims of the row operator\"\"\"\n totaldims = {}\n totaldims['nrows'] = self.__ddim\n totaldims['ncols'] = 0\n for iop in range(self.__nops):\n totaldims['ncols'] += dims[iop]['ncols']\n return totaldims\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward of a generic row operator Parameters: add - whether to add to the total output mod - a list of input models [mc,mb,ma] dat - output data vector\"\"\"\n if not isinstance(mod, list):\n raise Exception('Input model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n iadd = add\n for iop in range(self.__nops):\n self.__ops[iop].forward(iadd, mod[iop], dat)\n iadd = True\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint of a generic row operator (column operator) Parameters: add - whether to add to the total output mod - a list of output models [mc,mb,ma] dat - input data vector\"\"\"\n if not isinstance(mod, list):\n raise Exception('Output model must be a list of numpy arrays')\n if len(mod) != self.__nops:\n raise Exception('Number of models in list must be same as number of operators')\n if dat.shape != self.__ddim:\n raise Exception('Output data does not match row operator')\n for iop in range(self.__nops):\n self.__ops[iop].forward(add, m[iop], data)\n", "source": "the_stack_v2_python_sparse", "source_path": "opt/linopt/combops.py", "source_repo": "ke0m/scaas", "split": "val", "star_events_count": 2} {"blob_id": "81f363368eb9da41b4414fd7151288444367e500", "bodies": ["def dfs(i):\n if visited[i]:\n return 0\n visited[i] = True\n count = 1\n for j in range(len(M[i])):\n if M[i][j] == 1 and i != j:\n count += dfs(j)\n return count\ncount = 0\nvisited = [False] * len(M)\nfor i in range(len(M)):\n if dfs(i) > 0:\n count += 1\nreturn count", "class UnionFind:\n\n def __init__(self, n):\n self.set = list(range(n))\n self.count = n\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x])\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root != y_root:\n self.set[min(x_root, y_root)] = max(x_root, y_root)\n self.count -= 1\ncircles = UnionFind(len(M))\nfor i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] and i != j:\n circles.union_set(i, j)\nreturn circles.count", "def traverse(i, j, cid):\n if i < 0 or i >= len(M) or j < 0 or (j >= len(M[i])):\n return\n if M[i][j] <= 0:\n return\n M[i][j] = cid\n traverse(i + 1, j, cid)\n traverse(i, j + 1, cid)\ncid = 0\nfor i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] == 1:\n cid -= 1\n traverse(i, j, cid)\nreturn -cid", "def search(i, j):\n if i >= len(M) or j >= len(M[i]) or M[i][j] == 0:\n return 0\n M[i][j] = 0\n count = 1\n count += search(i + 1, j) + search(i - 1, j) + search(i, j + 1) + search(i, j - 1)\n return count\ncount = 0\nfor i in range(len(M)):\n for j in range(len(M[i])):\n if search(i, j) > 0:\n count += 1\nreturn count"], "bodies_text": "<|body_start_0|>\n def dfs(i):\n if visited[i]:\n return 0\n visited[i] = True\n count = 1\n for j in range(len(M[i])):\n if M[i][j] == 1 and i != j:\n count += dfs(j)\n return count\n count = 0\n visited = [False] * len(M)\n for i in range(len(M)):\n if dfs(i) > 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n class UnionFind:\n\n def __init__(self, n):\n self.set = list(range(n))\n self.count = n\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x])\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root != y_root:\n self.set[min(x_root, y_root)] = max(x_root, y_root)\n self.count -= 1\n circles = UnionFind(len(M))\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] and i != j:\n circles.union_set(i, j)\n return circles.count\n<|end_body_1|>\n\n<|body_start_2|>\n def traverse(i, j, cid):\n if i < 0 or i >= len(M) or j < 0 or (j >= len(M[i])):\n return\n if M[i][j] <= 0:\n return\n M[i][j] = cid\n traverse(i + 1, j, cid)\n traverse(i, j + 1, cid)\n cid = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] == 1:\n cid -= 1\n traverse(i, j, cid)\n return -cid\n<|end_body_2|>\n\n<|body_start_3|>\n def search(i, j):\n if i >= len(M) or j >= len(M[i]) or M[i][j] == 0:\n return 0\n M[i][j] = 0\n count = 1\n count += search(i + 1, j) + search(i - 1, j) + search(i, j + 1) + search(i, j - 1)\n return count\n count = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if search(i, j) > 0:\n count += 1\n return count\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findCircleNum(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def findCircleNum_unionfind(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n def findCircleNum_wrong(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_2|>\n\n def findCircleNum_toomuchrecursion(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(i):\n if visited[i]:\n return 0\n visited[i] = True\n count = 1\n for j in range(len(M[i])):\n if M[i][j] == 1 and i != j:\n count += dfs(j)\n return count\n count = 0\n visited = [False] * len(M)\n for i in range(len(M)):\n if dfs(i) > 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n class UnionFind:\n\n def __init__(self, n):\n self.set = list(range(n))\n self.count = n\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x])\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root != y_root:\n self.set[min(x_root, y_root)] = max(x_root, y_root)\n self.count -= 1\n circles = UnionFind(len(M))\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] and i != j:\n circles.union_set(i, j)\n return circles.count\n<|end_body_1|>\n\n<|body_start_2|>\n def traverse(i, j, cid):\n if i < 0 or i >= len(M) or j < 0 or (j >= len(M[i])):\n return\n if M[i][j] <= 0:\n return\n M[i][j] = cid\n traverse(i + 1, j, cid)\n traverse(i, j + 1, cid)\n cid = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] == 1:\n cid -= 1\n traverse(i, j, cid)\n return -cid\n<|end_body_2|>\n\n<|body_start_3|>\n def search(i, j):\n if i >= len(M) or j >= len(M[i]) or M[i][j] == 0:\n return 0\n M[i][j] = 0\n count = 1\n count += search(i + 1, j) + search(i - 1, j) + search(i, j + 1) + search(i, j - 1)\n return count\n count = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if search(i, j) > 0:\n count += 1\n return count\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000450", "length_bytes": 24866, "license_type": "no_license", "methods": [{"docstring": ":type M: List[List[int]] :rtype: int", "name": "findCircleNum", "signature": "def findCircleNum(self, M)"}, {"docstring": ":type M: List[List[int]] :rtype: int", "name": "findCircleNum_unionfind", "signature": "def findCircleNum_unionfind(self, M)"}, {"docstring": ":type M: List[List[int]] :rtype: int", "name": "findCircleNum_wrong", "signature": "def findCircleNum_wrong(self, M)"}, {"docstring": ":type M: List[List[int]] :rtype: int", "name": "findCircleNum_toomuchrecursion", "signature": "def findCircleNum_toomuchrecursion(self, M)"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findCircleNum(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_unionfind(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_wrong(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_toomuchrecursion(self, M): :type M: List[List[int]] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findCircleNum(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_unionfind(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_wrong(self, M): :type M: List[List[int]] :rtype: int\n- def findCircleNum_toomuchrecursion(self, M): :type M: List[List[int]] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def findCircleNum(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def findCircleNum_unionfind(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n def findCircleNum_wrong(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_2|>\n\n def findCircleNum_toomuchrecursion(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(i):\n if visited[i]:\n return 0\n visited[i] = True\n count = 1\n for j in range(len(M[i])):\n if M[i][j] == 1 and i != j:\n count += dfs(j)\n return count\n count = 0\n visited = [False] * len(M)\n for i in range(len(M)):\n if dfs(i) > 0:\n count += 1\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n class UnionFind:\n\n def __init__(self, n):\n self.set = list(range(n))\n self.count = n\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x])\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root != y_root:\n self.set[min(x_root, y_root)] = max(x_root, y_root)\n self.count -= 1\n circles = UnionFind(len(M))\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] and i != j:\n circles.union_set(i, j)\n return circles.count\n<|end_body_1|>\n\n<|body_start_2|>\n def traverse(i, j, cid):\n if i < 0 or i >= len(M) or j < 0 or (j >= len(M[i])):\n return\n if M[i][j] <= 0:\n return\n M[i][j] = cid\n traverse(i + 1, j, cid)\n traverse(i, j + 1, cid)\n cid = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] == 1:\n cid -= 1\n traverse(i, j, cid)\n return -cid\n<|end_body_2|>\n\n<|body_start_3|>\n def search(i, j):\n if i >= len(M) or j >= len(M[i]) or M[i][j] == 0:\n return 0\n M[i][j] = 0\n count = 1\n count += search(i + 1, j) + search(i - 1, j) + search(i, j + 1) + search(i, j - 1)\n return count\n count = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if search(i, j) > 0:\n count += 1\n return count\n<|end_body_3|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findCircleNum(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_0|>\n\n def findCircleNum_unionfind(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_1|>\n\n def findCircleNum_wrong(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_2|>\n\n def findCircleNum_toomuchrecursion(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findCircleNum(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n def dfs(i):\n if visited[i]:\n return 0\n visited[i] = True\n count = 1\n for j in range(len(M[i])):\n if M[i][j] == 1 and i != j:\n count += dfs(j)\n return count\n count = 0\n visited = [False] * len(M)\n for i in range(len(M)):\n if dfs(i) > 0:\n count += 1\n return count\n\n def findCircleNum_unionfind(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n class UnionFind:\n\n def __init__(self, n):\n self.set = list(range(n))\n self.count = n\n\n def find_set(self, x):\n if self.set[x] != x:\n self.set[x] = self.find_set(self.set[x])\n return self.set[x]\n\n def union_set(self, x, y):\n x_root, y_root = map(self.find_set, (x, y))\n if x_root != y_root:\n self.set[min(x_root, y_root)] = max(x_root, y_root)\n self.count -= 1\n circles = UnionFind(len(M))\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] and i != j:\n circles.union_set(i, j)\n return circles.count\n\n def findCircleNum_wrong(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n def traverse(i, j, cid):\n if i < 0 or i >= len(M) or j < 0 or (j >= len(M[i])):\n return\n if M[i][j] <= 0:\n return\n M[i][j] = cid\n traverse(i + 1, j, cid)\n traverse(i, j + 1, cid)\n cid = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if M[i][j] == 1:\n cid -= 1\n traverse(i, j, cid)\n return -cid\n\n def findCircleNum_toomuchrecursion(self, M):\n \"\"\":type M: List[List[int]] :rtype: int\"\"\"\n def search(i, j):\n if i >= len(M) or j >= len(M[i]) or M[i][j] == 0:\n return 0\n M[i][j] = 0\n count = 1\n count += search(i + 1, j) + search(i - 1, j) + search(i, j + 1) + search(i, j - 1)\n return count\n count = 0\n for i in range(len(M)):\n for j in range(len(M[i])):\n if search(i, j) > 0:\n count += 1\n return count\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_547.py", "source_repo": "oxhead/CodingYourWay", "split": "val", "star_events_count": 0} {"blob_id": "d009f1456d50d20ab77715744276f21133d6fd0b", "bodies": ["examples, _ = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True)\nself.data_train = examples['train']\nself.data_valid = examples['validation']\nself.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(self.data_train)", "tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((pt.numpy() for pt, en in data), target_vocab_size=2 ** 15)\ntokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for pt, en in data), target_vocab_size=2 ** 15)\nreturn (tokenizer_pt, tokenizer_en)"], "bodies_text": "<|body_start_0|>\n examples, _ = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True)\n self.data_train = examples['train']\n self.data_valid = examples['validation']\n self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(self.data_train)\n<|end_body_0|>\n\n<|body_start_1|>\n tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((pt.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n return (tokenizer_pt, tokenizer_en)\n<|end_body_1|>\n", "class_docstring": "Data set", "class_name": "Dataset", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Dataset:\n \"\"\"Data set\"\"\"\n\n def __init__(self):\n \"\"\"Data set\"\"\"\n <|body_0|>\n\n def tokenize_dataset(self, data):\n \"\"\"Data set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n examples, _ = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True)\n self.data_train = examples['train']\n self.data_valid = examples['validation']\n self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(self.data_train)\n<|end_body_0|>\n\n<|body_start_1|>\n tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((pt.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n return (tokenizer_pt, tokenizer_en)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000451", "length_bytes": 1133, "license_type": "no_license", "methods": [{"docstring": "Data set", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Data set", "name": "tokenize_dataset", "signature": "def tokenize_dataset(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003944", "prompt": "Implement the Python class `Dataset` described below.\n\nClass description:\nData set\n\nMethod signatures and docstrings:\n- def __init__(self): Data set\n- def tokenize_dataset(self, data): Data set", "prompted_full_text": "Implement the Python class `Dataset` described below.\n\nClass description:\nData set\n\nMethod signatures and docstrings:\n- def __init__(self): Data set\n- def tokenize_dataset(self, data): Data set\n\n<|skeleton|>\nclass Dataset:\n \"\"\"Data set\"\"\"\n\n def __init__(self):\n \"\"\"Data set\"\"\"\n <|body_0|>\n\n def tokenize_dataset(self, data):\n \"\"\"Data set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n examples, _ = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True)\n self.data_train = examples['train']\n self.data_valid = examples['validation']\n self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(self.data_train)\n<|end_body_0|>\n\n<|body_start_1|>\n tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((pt.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n return (tokenizer_pt, tokenizer_en)\n<|end_body_1|>\n", "revision_id": "8761eb876046ad3c0c3f85d98dbdca4007d93cd1", "skeleton": "<|skeleton|>\nclass Dataset:\n \"\"\"Data set\"\"\"\n\n def __init__(self):\n \"\"\"Data set\"\"\"\n <|body_0|>\n\n def tokenize_dataset(self, data):\n \"\"\"Data set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Dataset:\n \"\"\"Data set\"\"\"\n\n def __init__(self):\n \"\"\"Data set\"\"\"\n examples, _ = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True)\n self.data_train = examples['train']\n self.data_valid = examples['validation']\n self.tokenizer_pt, self.tokenizer_en = self.tokenize_dataset(self.data_train)\n\n def tokenize_dataset(self, data):\n \"\"\"Data set\"\"\"\n tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((pt.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for pt, en in data), target_vocab_size=2 ** 15)\n return (tokenizer_pt, tokenizer_en)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x12-transformer_apps/0-dataset.py", "source_repo": "oran2527/holbertonschool-machine_learning", "split": "val", "star_events_count": 0} {"blob_id": "8c42522c263eb8d0c4d6be04b4102fab36834681", "bodies": ["args = failed_parser.parse_args()\npage = args['page']\nper_page = args['per_page']\nsort_by = args['sort_by']\nsort_order = args['order']\nif sort_by == 'failure_time':\n sort_by = 'tof'\nif per_page > 100:\n per_page = 100\ndescending = sort_order == 'desc'\nif per_page > 100:\n per_page = 100\nstart = per_page * (page - 1)\nstop = start + per_page\nkwargs = {'start': start, 'stop': stop, 'descending': descending, 'sort_by': sort_by, 'session': session}\ntotal_items = db.get_failures(session, count=True)\nif not total_items:\n return jsonify([])\nfailed_entries = [failed.to_dict() for failed in db.get_failures(**kwargs)]\ntotal_pages = int(ceil(total_items / float(per_page)))\nif page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\nactual_size = min(per_page, len(failed_entries))\npagination = pagination_headers(total_pages, total_items, actual_size, request)\nrsp = jsonify(failed_entries)\nrsp.headers.extend(pagination)\nreturn rsp", "logger.debug('deleting all failed entries')\ndeleted = session.query(db.FailedEntry).delete()\nreturn success_response('successfully deleted %d failed entries' % deleted)"], "bodies_text": "<|body_start_0|>\n args = failed_parser.parse_args()\n page = args['page']\n per_page = args['per_page']\n sort_by = args['sort_by']\n sort_order = args['order']\n if sort_by == 'failure_time':\n sort_by = 'tof'\n if per_page > 100:\n per_page = 100\n descending = sort_order == 'desc'\n if per_page > 100:\n per_page = 100\n start = per_page * (page - 1)\n stop = start + per_page\n kwargs = {'start': start, 'stop': stop, 'descending': descending, 'sort_by': sort_by, 'session': session}\n total_items = db.get_failures(session, count=True)\n if not total_items:\n return jsonify([])\n failed_entries = [failed.to_dict() for failed in db.get_failures(**kwargs)]\n total_pages = int(ceil(total_items / float(per_page)))\n if page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\n actual_size = min(per_page, len(failed_entries))\n pagination = pagination_headers(total_pages, total_items, actual_size, request)\n rsp = jsonify(failed_entries)\n rsp.headers.extend(pagination)\n return rsp\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('deleting all failed entries')\n deleted = session.query(db.FailedEntry).delete()\n return success_response('successfully deleted %d failed entries' % deleted)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RetryFailed", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RetryFailed:\n\n def get(self, session=None):\n \"\"\"List all failed entries\"\"\"\n <|body_0|>\n\n def delete(self, session=None):\n \"\"\"Clear all failed entries\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = failed_parser.parse_args()\n page = args['page']\n per_page = args['per_page']\n sort_by = args['sort_by']\n sort_order = args['order']\n if sort_by == 'failure_time':\n sort_by = 'tof'\n if per_page > 100:\n per_page = 100\n descending = sort_order == 'desc'\n if per_page > 100:\n per_page = 100\n start = per_page * (page - 1)\n stop = start + per_page\n kwargs = {'start': start, 'stop': stop, 'descending': descending, 'sort_by': sort_by, 'session': session}\n total_items = db.get_failures(session, count=True)\n if not total_items:\n return jsonify([])\n failed_entries = [failed.to_dict() for failed in db.get_failures(**kwargs)]\n total_pages = int(ceil(total_items / float(per_page)))\n if page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\n actual_size = min(per_page, len(failed_entries))\n pagination = pagination_headers(total_pages, total_items, actual_size, request)\n rsp = jsonify(failed_entries)\n rsp.headers.extend(pagination)\n return rsp\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('deleting all failed entries')\n deleted = session.query(db.FailedEntry).delete()\n return success_response('successfully deleted %d failed entries' % deleted)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000452", "length_bytes": 4968, "license_type": "permissive", "methods": [{"docstring": "List all failed entries", "name": "get", "signature": "def get(self, session=None)"}, {"docstring": "Clear all failed entries", "name": "delete", "signature": "def delete(self, session=None)"}], "n_methods": 2, "prompt": "Implement the Python class `RetryFailed` described below.\n\nClass description:\nImplement the RetryFailed class.\n\nMethod signatures and docstrings:\n- def get(self, session=None): List all failed entries\n- def delete(self, session=None): Clear all failed entries", "prompted_full_text": "Implement the Python class `RetryFailed` described below.\n\nClass description:\nImplement the RetryFailed class.\n\nMethod signatures and docstrings:\n- def get(self, session=None): List all failed entries\n- def delete(self, session=None): Clear all failed entries\n\n<|skeleton|>\nclass RetryFailed:\n\n def get(self, session=None):\n \"\"\"List all failed entries\"\"\"\n <|body_0|>\n\n def delete(self, session=None):\n \"\"\"Clear all failed entries\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = failed_parser.parse_args()\n page = args['page']\n per_page = args['per_page']\n sort_by = args['sort_by']\n sort_order = args['order']\n if sort_by == 'failure_time':\n sort_by = 'tof'\n if per_page > 100:\n per_page = 100\n descending = sort_order == 'desc'\n if per_page > 100:\n per_page = 100\n start = per_page * (page - 1)\n stop = start + per_page\n kwargs = {'start': start, 'stop': stop, 'descending': descending, 'sort_by': sort_by, 'session': session}\n total_items = db.get_failures(session, count=True)\n if not total_items:\n return jsonify([])\n failed_entries = [failed.to_dict() for failed in db.get_failures(**kwargs)]\n total_pages = int(ceil(total_items / float(per_page)))\n if page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\n actual_size = min(per_page, len(failed_entries))\n pagination = pagination_headers(total_pages, total_items, actual_size, request)\n rsp = jsonify(failed_entries)\n rsp.headers.extend(pagination)\n return rsp\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('deleting all failed entries')\n deleted = session.query(db.FailedEntry).delete()\n return success_response('successfully deleted %d failed entries' % deleted)\n<|end_body_1|>\n", "revision_id": "2b7e8314d103c94cf4552bd0152699eeca0ad159", "skeleton": "<|skeleton|>\nclass RetryFailed:\n\n def get(self, session=None):\n \"\"\"List all failed entries\"\"\"\n <|body_0|>\n\n def delete(self, session=None):\n \"\"\"Clear all failed entries\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RetryFailed:\n def get(self, session=None):\n \"\"\"List all failed entries\"\"\"\n args = failed_parser.parse_args()\n page = args['page']\n per_page = args['per_page']\n sort_by = args['sort_by']\n sort_order = args['order']\n if sort_by == 'failure_time':\n sort_by = 'tof'\n if per_page > 100:\n per_page = 100\n descending = sort_order == 'desc'\n if per_page > 100:\n per_page = 100\n start = per_page * (page - 1)\n stop = start + per_page\n kwargs = {'start': start, 'stop': stop, 'descending': descending, 'sort_by': sort_by, 'session': session}\n total_items = db.get_failures(session, count=True)\n if not total_items:\n return jsonify([])\n failed_entries = [failed.to_dict() for failed in db.get_failures(**kwargs)]\n total_pages = int(ceil(total_items / float(per_page)))\n if page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\n actual_size = min(per_page, len(failed_entries))\n pagination = pagination_headers(total_pages, total_items, actual_size, request)\n rsp = jsonify(failed_entries)\n rsp.headers.extend(pagination)\n return rsp\n\n def delete(self, session=None):\n \"\"\"Clear all failed entries\"\"\"\n logger.debug('deleting all failed entries')\n deleted = session.query(db.FailedEntry).delete()\n return success_response('successfully deleted %d failed entries' % deleted)\n", "source": "the_stack_v2_python_sparse", "source_path": "flexget/components/failed/api.py", "source_repo": "BrutuZ/Flexget", "split": "val", "star_events_count": 1} {"blob_id": "81f5465f85b4d9047c2b66e4a317d4832f0c294f", "bodies": ["errors = {}\nif user_input is not None:\n try:\n token = await validate_input(self.hass, user_input)\n await self.async_set_unique_id(user_input['username'])\n return self.async_create_entry(title=user_input['username'], data={'username': user_input['username'], 'token': token})\n except Require2FA:\n self.user_pass = user_input\n return await self.async_step_2fa()\n except InvalidAuth:\n errors['base'] = 'invalid_auth'\n except Exception:\n _LOGGER.exception('Unexpected exception')\n errors['base'] = 'unknown'\nreturn self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('username'): str, vol.Required('password'): str}), errors=errors)", "if user_input:\n return await self.async_step_user({**self.user_pass, **user_input})\nreturn self.async_show_form(step_id='2fa', data_schema=vol.Schema({vol.Required('2fa'): str}))"], "bodies_text": "<|body_start_0|>\n errors = {}\n if user_input is not None:\n try:\n token = await validate_input(self.hass, user_input)\n await self.async_set_unique_id(user_input['username'])\n return self.async_create_entry(title=user_input['username'], data={'username': user_input['username'], 'token': token})\n except Require2FA:\n self.user_pass = user_input\n return await self.async_step_2fa()\n except InvalidAuth:\n errors['base'] = 'invalid_auth'\n except Exception:\n _LOGGER.exception('Unexpected exception')\n errors['base'] = 'unknown'\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('username'): str, vol.Required('password'): str}), errors=errors)\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input:\n return await self.async_step_user({**self.user_pass, **user_input})\n return self.async_show_form(step_id='2fa', data_schema=vol.Schema({vol.Required('2fa'): str}))\n<|end_body_1|>\n", "class_docstring": "Handle a config flow for Ring.", "class_name": "RingConfigFlow", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RingConfigFlow:\n \"\"\"Handle a config flow for Ring.\"\"\"\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_0|>\n\n async def async_step_2fa(self, user_input=None):\n \"\"\"Handle 2fa step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors = {}\n if user_input is not None:\n try:\n token = await validate_input(self.hass, user_input)\n await self.async_set_unique_id(user_input['username'])\n return self.async_create_entry(title=user_input['username'], data={'username': user_input['username'], 'token': token})\n except Require2FA:\n self.user_pass = user_input\n return await self.async_step_2fa()\n except InvalidAuth:\n errors['base'] = 'invalid_auth'\n except Exception:\n _LOGGER.exception('Unexpected exception')\n errors['base'] = 'unknown'\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('username'): str, vol.Required('password'): str}), errors=errors)\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input:\n return await self.async_step_user({**self.user_pass, **user_input})\n return self.async_show_form(step_id='2fa', data_schema=vol.Schema({vol.Required('2fa'): str}))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000453", "length_bytes": 2684, "license_type": "permissive", "methods": [{"docstring": "Handle the initial step.", "name": "async_step_user", "signature": "async def async_step_user(self, user_input=None)"}, {"docstring": "Handle 2fa step.", "name": "async_step_2fa", "signature": "async def async_step_2fa(self, user_input=None)"}], "n_methods": 2, "prompt": "Implement the Python class `RingConfigFlow` described below.\n\nClass description:\nHandle a config flow for Ring.\n\nMethod signatures and docstrings:\n- async def async_step_user(self, user_input=None): Handle the initial step.\n- async def async_step_2fa(self, user_input=None): Handle 2fa step.", "prompted_full_text": "Implement the Python class `RingConfigFlow` described below.\n\nClass description:\nHandle a config flow for Ring.\n\nMethod signatures and docstrings:\n- async def async_step_user(self, user_input=None): Handle the initial step.\n- async def async_step_2fa(self, user_input=None): Handle 2fa step.\n\n<|skeleton|>\nclass RingConfigFlow:\n \"\"\"Handle a config flow for Ring.\"\"\"\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_0|>\n\n async def async_step_2fa(self, user_input=None):\n \"\"\"Handle 2fa step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors = {}\n if user_input is not None:\n try:\n token = await validate_input(self.hass, user_input)\n await self.async_set_unique_id(user_input['username'])\n return self.async_create_entry(title=user_input['username'], data={'username': user_input['username'], 'token': token})\n except Require2FA:\n self.user_pass = user_input\n return await self.async_step_2fa()\n except InvalidAuth:\n errors['base'] = 'invalid_auth'\n except Exception:\n _LOGGER.exception('Unexpected exception')\n errors['base'] = 'unknown'\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('username'): str, vol.Required('password'): str}), errors=errors)\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input:\n return await self.async_step_user({**self.user_pass, **user_input})\n return self.async_show_form(step_id='2fa', data_schema=vol.Schema({vol.Required('2fa'): str}))\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass RingConfigFlow:\n \"\"\"Handle a config flow for Ring.\"\"\"\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_0|>\n\n async def async_step_2fa(self, user_input=None):\n \"\"\"Handle 2fa step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RingConfigFlow:\n \"\"\"Handle a config flow for Ring.\"\"\"\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n if user_input is not None:\n try:\n token = await validate_input(self.hass, user_input)\n await self.async_set_unique_id(user_input['username'])\n return self.async_create_entry(title=user_input['username'], data={'username': user_input['username'], 'token': token})\n except Require2FA:\n self.user_pass = user_input\n return await self.async_step_2fa()\n except InvalidAuth:\n errors['base'] = 'invalid_auth'\n except Exception:\n _LOGGER.exception('Unexpected exception')\n errors['base'] = 'unknown'\n return self.async_show_form(step_id='user', data_schema=vol.Schema({vol.Required('username'): str, vol.Required('password'): str}), errors=errors)\n\n async def async_step_2fa(self, user_input=None):\n \"\"\"Handle 2fa step.\"\"\"\n if user_input:\n return await self.async_step_user({**self.user_pass, **user_input})\n return self.async_show_form(step_id='2fa', data_schema=vol.Schema({vol.Required('2fa'): str}))\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/ring/config_flow.py", "source_repo": "home-assistant/core", "split": "val", "star_events_count": 35501} {"blob_id": "16a6433799f3af263c5added4e5d258efc8babc1", "bodies": ["self.prfs_d = extract_settings()\nself.logger = logger\nself.mag = mag\nself.scmp_d = scmp_d\nself.scmp_cf = scmp_cf\nself.sex_d = sex_d\nself.scamp_process()", "sex_cf = '{}_{}_{}_{}_{}'.format(self.sex_d['deblend_nthresh'], self.sex_d['analysis_thresh'], self.sex_d['detect_thresh'], self.sex_d['deblend_mincount'], self.sex_d['detect_minarea'])\nself.logger.info('Scamp process for magnitude {}'.format(self.mag))\nself.logger.info('Sextractor configuration: {}'.format(sex_cf))\nself.logger.info('Scamp configuration: {}'.format(self.scmp_cf))\nscmp_1 = 'scamp -c {}'.format(self.prfs_d['conf_scamp'])\nsex_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\nsex_output = 'mag_{}_CCD_x?_y?_d?.cat'.format(self.mag)\nscmp_2 = ' {}/{}'.format(sex_loc, sex_output)\nscmp_3 = ' -ASTREFCAT_NAME'\ncat_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\ncat_input = 'catalog_{}.cat'.format(self.mag)\nscmp_4 = ' {}/{}'.format(cat_loc, cat_input)\nscmp_5 = ' -PIXSCALE_MAXERR {}'.format(self.scmp_d['pixscale_maxerr'])\nscmp_6 = ' -POSANGLE_MAXERR {}'.format(self.scmp_d['posangle_maxerr'])\nscmp_7 = ' -POSITION_MAXERR {}'.format(self.scmp_d['position_maxerr'])\nscmp_8 = ' -CROSSID_RADIUS {}'.format(self.scmp_d['crossid_radius'])\ncats_dir = '{}/{}/{}/{}'.format(self.prfs_d['catalogs_dir'], self.mag, sex_cf, self.scmp_cf)\nmerged_cat = '{}/merged_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\nscmp_9 = ' -MERGEDOUTCAT_NAME {}'.format(merged_cat)\nfull_cat = '{}/full_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\nscmp_10 = ' -FULLOUTCAT_NAME {}'.format(full_cat)\nscmp_p = scmp_1 + scmp_2 + scmp_3 + scmp_4 + scmp_5\nscmp_p = scmp_p + scmp_6 + scmp_7 + scmp_8 + scmp_9\nscmp_p = scmp_p + scmp_10\ncreate_folder(self.logger, cats_dir)\nprocess_scamp = Popen(scmp_p, shell=True)\nprocess_scamp.wait()\nself.logger.info('Scamp process finished.')\nreturn True"], "bodies_text": "<|body_start_0|>\n self.prfs_d = extract_settings()\n self.logger = logger\n self.mag = mag\n self.scmp_d = scmp_d\n self.scmp_cf = scmp_cf\n self.sex_d = sex_d\n self.scamp_process()\n<|end_body_0|>\n\n<|body_start_1|>\n sex_cf = '{}_{}_{}_{}_{}'.format(self.sex_d['deblend_nthresh'], self.sex_d['analysis_thresh'], self.sex_d['detect_thresh'], self.sex_d['deblend_mincount'], self.sex_d['detect_minarea'])\n self.logger.info('Scamp process for magnitude {}'.format(self.mag))\n self.logger.info('Sextractor configuration: {}'.format(sex_cf))\n self.logger.info('Scamp configuration: {}'.format(self.scmp_cf))\n scmp_1 = 'scamp -c {}'.format(self.prfs_d['conf_scamp'])\n sex_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n sex_output = 'mag_{}_CCD_x?_y?_d?.cat'.format(self.mag)\n scmp_2 = ' {}/{}'.format(sex_loc, sex_output)\n scmp_3 = ' -ASTREFCAT_NAME'\n cat_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n cat_input = 'catalog_{}.cat'.format(self.mag)\n scmp_4 = ' {}/{}'.format(cat_loc, cat_input)\n scmp_5 = ' -PIXSCALE_MAXERR {}'.format(self.scmp_d['pixscale_maxerr'])\n scmp_6 = ' -POSANGLE_MAXERR {}'.format(self.scmp_d['posangle_maxerr'])\n scmp_7 = ' -POSITION_MAXERR {}'.format(self.scmp_d['position_maxerr'])\n scmp_8 = ' -CROSSID_RADIUS {}'.format(self.scmp_d['crossid_radius'])\n cats_dir = '{}/{}/{}/{}'.format(self.prfs_d['catalogs_dir'], self.mag, sex_cf, self.scmp_cf)\n merged_cat = '{}/merged_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_9 = ' -MERGEDOUTCAT_NAME {}'.format(merged_cat)\n full_cat = '{}/full_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_10 = ' -FULLOUTCAT_NAME {}'.format(full_cat)\n scmp_p = scmp_1 + scmp_2 + scmp_3 + scmp_4 + scmp_5\n scmp_p = scmp_p + scmp_6 + scmp_7 + scmp_8 + scmp_9\n scmp_p = scmp_p + scmp_10\n create_folder(self.logger, cats_dir)\n process_scamp = Popen(scmp_p, shell=True)\n process_scamp.wait()\n self.logger.info('Scamp process finished.')\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Scamp", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Scamp:\n\n def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d):\n \"\"\":param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\"\"\"\n <|body_0|>\n\n def scamp_process(self):\n \"\"\":return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prfs_d = extract_settings()\n self.logger = logger\n self.mag = mag\n self.scmp_d = scmp_d\n self.scmp_cf = scmp_cf\n self.sex_d = sex_d\n self.scamp_process()\n<|end_body_0|>\n\n<|body_start_1|>\n sex_cf = '{}_{}_{}_{}_{}'.format(self.sex_d['deblend_nthresh'], self.sex_d['analysis_thresh'], self.sex_d['detect_thresh'], self.sex_d['deblend_mincount'], self.sex_d['detect_minarea'])\n self.logger.info('Scamp process for magnitude {}'.format(self.mag))\n self.logger.info('Sextractor configuration: {}'.format(sex_cf))\n self.logger.info('Scamp configuration: {}'.format(self.scmp_cf))\n scmp_1 = 'scamp -c {}'.format(self.prfs_d['conf_scamp'])\n sex_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n sex_output = 'mag_{}_CCD_x?_y?_d?.cat'.format(self.mag)\n scmp_2 = ' {}/{}'.format(sex_loc, sex_output)\n scmp_3 = ' -ASTREFCAT_NAME'\n cat_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n cat_input = 'catalog_{}.cat'.format(self.mag)\n scmp_4 = ' {}/{}'.format(cat_loc, cat_input)\n scmp_5 = ' -PIXSCALE_MAXERR {}'.format(self.scmp_d['pixscale_maxerr'])\n scmp_6 = ' -POSANGLE_MAXERR {}'.format(self.scmp_d['posangle_maxerr'])\n scmp_7 = ' -POSITION_MAXERR {}'.format(self.scmp_d['position_maxerr'])\n scmp_8 = ' -CROSSID_RADIUS {}'.format(self.scmp_d['crossid_radius'])\n cats_dir = '{}/{}/{}/{}'.format(self.prfs_d['catalogs_dir'], self.mag, sex_cf, self.scmp_cf)\n merged_cat = '{}/merged_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_9 = ' -MERGEDOUTCAT_NAME {}'.format(merged_cat)\n full_cat = '{}/full_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_10 = ' -FULLOUTCAT_NAME {}'.format(full_cat)\n scmp_p = scmp_1 + scmp_2 + scmp_3 + scmp_4 + scmp_5\n scmp_p = scmp_p + scmp_6 + scmp_7 + scmp_8 + scmp_9\n scmp_p = scmp_p + scmp_10\n create_folder(self.logger, cats_dir)\n process_scamp = Popen(scmp_p, shell=True)\n process_scamp.wait()\n self.logger.info('Scamp process finished.')\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000454", "length_bytes": 3359, "license_type": "no_license", "methods": [{"docstring": ":param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:", "name": "__init__", "signature": "def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d)"}, {"docstring": ":return:", "name": "scamp_process", "signature": "def scamp_process(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006442", "prompt": "Implement the Python class `Scamp` described below.\n\nClass description:\nImplement the Scamp class.\n\nMethod signatures and docstrings:\n- def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d): :param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\n- def scamp_process(self): :return:", "prompted_full_text": "Implement the Python class `Scamp` described below.\n\nClass description:\nImplement the Scamp class.\n\nMethod signatures and docstrings:\n- def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d): :param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\n- def scamp_process(self): :return:\n\n<|skeleton|>\nclass Scamp:\n\n def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d):\n \"\"\":param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\"\"\"\n <|body_0|>\n\n def scamp_process(self):\n \"\"\":return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prfs_d = extract_settings()\n self.logger = logger\n self.mag = mag\n self.scmp_d = scmp_d\n self.scmp_cf = scmp_cf\n self.sex_d = sex_d\n self.scamp_process()\n<|end_body_0|>\n\n<|body_start_1|>\n sex_cf = '{}_{}_{}_{}_{}'.format(self.sex_d['deblend_nthresh'], self.sex_d['analysis_thresh'], self.sex_d['detect_thresh'], self.sex_d['deblend_mincount'], self.sex_d['detect_minarea'])\n self.logger.info('Scamp process for magnitude {}'.format(self.mag))\n self.logger.info('Sextractor configuration: {}'.format(sex_cf))\n self.logger.info('Scamp configuration: {}'.format(self.scmp_cf))\n scmp_1 = 'scamp -c {}'.format(self.prfs_d['conf_scamp'])\n sex_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n sex_output = 'mag_{}_CCD_x?_y?_d?.cat'.format(self.mag)\n scmp_2 = ' {}/{}'.format(sex_loc, sex_output)\n scmp_3 = ' -ASTREFCAT_NAME'\n cat_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n cat_input = 'catalog_{}.cat'.format(self.mag)\n scmp_4 = ' {}/{}'.format(cat_loc, cat_input)\n scmp_5 = ' -PIXSCALE_MAXERR {}'.format(self.scmp_d['pixscale_maxerr'])\n scmp_6 = ' -POSANGLE_MAXERR {}'.format(self.scmp_d['posangle_maxerr'])\n scmp_7 = ' -POSITION_MAXERR {}'.format(self.scmp_d['position_maxerr'])\n scmp_8 = ' -CROSSID_RADIUS {}'.format(self.scmp_d['crossid_radius'])\n cats_dir = '{}/{}/{}/{}'.format(self.prfs_d['catalogs_dir'], self.mag, sex_cf, self.scmp_cf)\n merged_cat = '{}/merged_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_9 = ' -MERGEDOUTCAT_NAME {}'.format(merged_cat)\n full_cat = '{}/full_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_10 = ' -FULLOUTCAT_NAME {}'.format(full_cat)\n scmp_p = scmp_1 + scmp_2 + scmp_3 + scmp_4 + scmp_5\n scmp_p = scmp_p + scmp_6 + scmp_7 + scmp_8 + scmp_9\n scmp_p = scmp_p + scmp_10\n create_folder(self.logger, cats_dir)\n process_scamp = Popen(scmp_p, shell=True)\n process_scamp.wait()\n self.logger.info('Scamp process finished.')\n return True\n<|end_body_1|>\n", "revision_id": "ca9f090ed8b6049049c13a348cf1ebd8c054acd4", "skeleton": "<|skeleton|>\nclass Scamp:\n\n def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d):\n \"\"\":param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\"\"\"\n <|body_0|>\n\n def scamp_process(self):\n \"\"\":return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Scamp:\n def __init__(self, logger, mag, scmp_d, scmp_cf, sex_d):\n \"\"\":param logger: :param mag: :param scmp_d: :param scmp_cf: :param sex_d:\"\"\"\n self.prfs_d = extract_settings()\n self.logger = logger\n self.mag = mag\n self.scmp_d = scmp_d\n self.scmp_cf = scmp_cf\n self.sex_d = sex_d\n self.scamp_process()\n\n def scamp_process(self):\n \"\"\":return:\"\"\"\n sex_cf = '{}_{}_{}_{}_{}'.format(self.sex_d['deblend_nthresh'], self.sex_d['analysis_thresh'], self.sex_d['detect_thresh'], self.sex_d['deblend_mincount'], self.sex_d['detect_minarea'])\n self.logger.info('Scamp process for magnitude {}'.format(self.mag))\n self.logger.info('Sextractor configuration: {}'.format(sex_cf))\n self.logger.info('Scamp configuration: {}'.format(self.scmp_cf))\n scmp_1 = 'scamp -c {}'.format(self.prfs_d['conf_scamp'])\n sex_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n sex_output = 'mag_{}_CCD_x?_y?_d?.cat'.format(self.mag)\n scmp_2 = ' {}/{}'.format(sex_loc, sex_output)\n scmp_3 = ' -ASTREFCAT_NAME'\n cat_loc = '{}/{}/CCDs/{}'.format(self.prfs_d['fits_dir'], self.mag, sex_cf)\n cat_input = 'catalog_{}.cat'.format(self.mag)\n scmp_4 = ' {}/{}'.format(cat_loc, cat_input)\n scmp_5 = ' -PIXSCALE_MAXERR {}'.format(self.scmp_d['pixscale_maxerr'])\n scmp_6 = ' -POSANGLE_MAXERR {}'.format(self.scmp_d['posangle_maxerr'])\n scmp_7 = ' -POSITION_MAXERR {}'.format(self.scmp_d['position_maxerr'])\n scmp_8 = ' -CROSSID_RADIUS {}'.format(self.scmp_d['crossid_radius'])\n cats_dir = '{}/{}/{}/{}'.format(self.prfs_d['catalogs_dir'], self.mag, sex_cf, self.scmp_cf)\n merged_cat = '{}/merged_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_9 = ' -MERGEDOUTCAT_NAME {}'.format(merged_cat)\n full_cat = '{}/full_{}_{}.cat'.format(cats_dir, self.scmp_cf, self.mag)\n scmp_10 = ' -FULLOUTCAT_NAME {}'.format(full_cat)\n scmp_p = scmp_1 + scmp_2 + scmp_3 + scmp_4 + scmp_5\n scmp_p = scmp_p + scmp_6 + scmp_7 + scmp_8 + scmp_9\n scmp_p = scmp_p + scmp_10\n create_folder(self.logger, cats_dir)\n process_scamp = Popen(scmp_p, shell=True)\n process_scamp.wait()\n self.logger.info('Scamp process finished.')\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "pipeline_luca/scamp_aux_luca.py", "source_repo": "sgongar/Euclid-tests", "split": "val", "star_events_count": 0} {"blob_id": "62a244831a2f28e418a9e2f9f028294c8a6f0c97", "bodies": ["length = len(strs)\narr = [(word.count('1'), word.count('0')) for word in strs]\nmemo = {}\n\ndef dfs(one, zero, index):\n if index == length:\n return 0\n if (one, zero, index) in memo:\n return memo[one, zero, index]\n ans = 0\n cnt1, cnt0 = arr[index]\n if one + cnt1 <= n and zero + cnt0 <= m:\n ans = 1 + dfs(one + cnt1, zero + cnt0, index + 1)\n ans = max(ans, dfs(one, zero, index + 1))\n memo[one, zero, index] = ans\n return ans\nreturn dfs(0, 0, 0)", "dp = [[0] * (m + 1) for _ in range(n + 1)]\nfor word in strs:\n one = word.count('1')\n zero = word.count('0')\n for i in range(n, one - 1, -1):\n for j in range(m, zero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - one][j - zero] + 1)\nreturn dp[-1][-1]"], "bodies_text": "<|body_start_0|>\n length = len(strs)\n arr = [(word.count('1'), word.count('0')) for word in strs]\n memo = {}\n\n def dfs(one, zero, index):\n if index == length:\n return 0\n if (one, zero, index) in memo:\n return memo[one, zero, index]\n ans = 0\n cnt1, cnt0 = arr[index]\n if one + cnt1 <= n and zero + cnt0 <= m:\n ans = 1 + dfs(one + cnt1, zero + cnt0, index + 1)\n ans = max(ans, dfs(one, zero, index + 1))\n memo[one, zero, index] = ans\n return ans\n return dfs(0, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [[0] * (m + 1) for _ in range(n + 1)]\n for word in strs:\n one = word.count('1')\n zero = word.count('0')\n for i in range(n, one - 1, -1):\n for j in range(m, zero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - one][j - zero] + 1)\n return dp[-1][-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:记忆化递归 @param strs: @param m: @param n: @return:\"\"\"\n <|body_0|>\n\n def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:动态规划法 @param strs: @param m: @param n: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(strs)\n arr = [(word.count('1'), word.count('0')) for word in strs]\n memo = {}\n\n def dfs(one, zero, index):\n if index == length:\n return 0\n if (one, zero, index) in memo:\n return memo[one, zero, index]\n ans = 0\n cnt1, cnt0 = arr[index]\n if one + cnt1 <= n and zero + cnt0 <= m:\n ans = 1 + dfs(one + cnt1, zero + cnt0, index + 1)\n ans = max(ans, dfs(one, zero, index + 1))\n memo[one, zero, index] = ans\n return ans\n return dfs(0, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [[0] * (m + 1) for _ in range(n + 1)]\n for word in strs:\n one = word.count('1')\n zero = word.count('0')\n for i in range(n, one - 1, -1):\n for j in range(m, zero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - one][j - zero] + 1)\n return dp[-1][-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000455", "length_bytes": 2694, "license_type": "no_license", "methods": [{"docstring": "思路:记忆化递归 @param strs: @param m: @param n: @return:", "name": "findMaxForm1", "signature": "def findMaxForm1(self, strs: List[str], m: int, n: int) -> int"}, {"docstring": "思路:动态规划法 @param strs: @param m: @param n: @return:", "name": "findMaxForm2", "signature": "def findMaxForm2(self, strs: List[str], m: int, n: int) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001475", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMaxForm1(self, strs: List[str], m: int, n: int) -> int: 思路:记忆化递归 @param strs: @param m: @param n: @return:\n- def findMaxForm2(self, strs: List[str], m: int, n: int) -> int: 思路:动态规划法 @param strs: @param m: @param n: @return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMaxForm1(self, strs: List[str], m: int, n: int) -> int: 思路:记忆化递归 @param strs: @param m: @param n: @return:\n- def findMaxForm2(self, strs: List[str], m: int, n: int) -> int: 思路:动态规划法 @param strs: @param m: @param n: @return:\n\n<|skeleton|>\nclass Solution:\n\n def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:记忆化递归 @param strs: @param m: @param n: @return:\"\"\"\n <|body_0|>\n\n def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:动态规划法 @param strs: @param m: @param n: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(strs)\n arr = [(word.count('1'), word.count('0')) for word in strs]\n memo = {}\n\n def dfs(one, zero, index):\n if index == length:\n return 0\n if (one, zero, index) in memo:\n return memo[one, zero, index]\n ans = 0\n cnt1, cnt0 = arr[index]\n if one + cnt1 <= n and zero + cnt0 <= m:\n ans = 1 + dfs(one + cnt1, zero + cnt0, index + 1)\n ans = max(ans, dfs(one, zero, index + 1))\n memo[one, zero, index] = ans\n return ans\n return dfs(0, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [[0] * (m + 1) for _ in range(n + 1)]\n for word in strs:\n one = word.count('1')\n zero = word.count('0')\n for i in range(n, one - 1, -1):\n for j in range(m, zero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - one][j - zero] + 1)\n return dp[-1][-1]\n<|end_body_1|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:记忆化递归 @param strs: @param m: @param n: @return:\"\"\"\n <|body_0|>\n\n def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:动态规划法 @param strs: @param m: @param n: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findMaxForm1(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:记忆化递归 @param strs: @param m: @param n: @return:\"\"\"\n length = len(strs)\n arr = [(word.count('1'), word.count('0')) for word in strs]\n memo = {}\n\n def dfs(one, zero, index):\n if index == length:\n return 0\n if (one, zero, index) in memo:\n return memo[one, zero, index]\n ans = 0\n cnt1, cnt0 = arr[index]\n if one + cnt1 <= n and zero + cnt0 <= m:\n ans = 1 + dfs(one + cnt1, zero + cnt0, index + 1)\n ans = max(ans, dfs(one, zero, index + 1))\n memo[one, zero, index] = ans\n return ans\n return dfs(0, 0, 0)\n\n def findMaxForm2(self, strs: List[str], m: int, n: int) -> int:\n \"\"\"思路:动态规划法 @param strs: @param m: @param n: @return:\"\"\"\n dp = [[0] * (m + 1) for _ in range(n + 1)]\n for word in strs:\n one = word.count('1')\n zero = word.count('0')\n for i in range(n, one - 1, -1):\n for j in range(m, zero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - one][j - zero] + 1)\n return dp[-1][-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/记忆化/474. 一和零.py", "source_repo": "yiming1012/MyLeetCode", "split": "val", "star_events_count": 2} {"blob_id": "f1c4c0d942bd6af416c893bc13fd5b7352aebe21", "bodies": ["self.value = value\nself.identifier = identifier\nself.hint = hint", "hint = self.hint\nif isinstance(hint, type):\n type_hint = hint.__name__\nelif hint == ():\n type_hint = \"'no type'\"\nelif isinstance(hint, tuple):\n type_hint = ', '.join([type_.__name__ for type_ in hint])\nelse:\n type_hint = hint\nbad_type_name = type(self.value).__name__\nreturn 'Bad type for `{}`: {}. (Should be {}.)'.format(self.identifier, bad_type_name, type_hint)"], "bodies_text": "<|body_start_0|>\n self.value = value\n self.identifier = identifier\n self.hint = hint\n<|end_body_0|>\n\n<|body_start_1|>\n hint = self.hint\n if isinstance(hint, type):\n type_hint = hint.__name__\n elif hint == ():\n type_hint = \"'no type'\"\n elif isinstance(hint, tuple):\n type_hint = ', '.join([type_.__name__ for type_ in hint])\n else:\n type_hint = hint\n bad_type_name = type(self.value).__name__\n return 'Bad type for `{}`: {}. (Should be {}.)'.format(self.identifier, bad_type_name, type_hint)\n<|end_body_1|>\n", "class_docstring": "An argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.", "class_name": "DoxhooksTypeError", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DoxhooksTypeError:\n \"\"\"An argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\"\"\"\n\n def __init__(self, value, identifier, hint):\n \"\"\"Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.value = value\n self.identifier = identifier\n self.hint = hint\n<|end_body_0|>\n\n<|body_start_1|>\n hint = self.hint\n if isinstance(hint, type):\n type_hint = hint.__name__\n elif hint == ():\n type_hint = \"'no type'\"\n elif isinstance(hint, tuple):\n type_hint = ', '.join([type_.__name__ for type_ in hint])\n else:\n type_hint = hint\n bad_type_name = type(self.value).__name__\n return 'Bad type for `{}`: {}. (Should be {}.)'.format(self.identifier, bad_type_name, type_hint)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000456", "length_bytes": 10728, "license_type": "permissive", "methods": [{"docstring": "Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.", "name": "__init__", "signature": "def __init__(self, value, identifier, hint)"}, {"docstring": "Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'", "name": "__str__", "signature": "def __str__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000379", "prompt": "Implement the Python class `DoxhooksTypeError` described below.\n\nClass description:\nAn argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\n\nMethod signatures and docstrings:\n- def __init__(self, value, identifier, hint): Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\n- def __str__(self): Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'", "prompted_full_text": "Implement the Python class `DoxhooksTypeError` described below.\n\nClass description:\nAn argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\n\nMethod signatures and docstrings:\n- def __init__(self, value, identifier, hint): Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\n- def __str__(self): Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'\n\n<|skeleton|>\nclass DoxhooksTypeError:\n \"\"\"An argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\"\"\"\n\n def __init__(self, value, identifier, hint):\n \"\"\"Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.value = value\n self.identifier = identifier\n self.hint = hint\n<|end_body_0|>\n\n<|body_start_1|>\n hint = self.hint\n if isinstance(hint, type):\n type_hint = hint.__name__\n elif hint == ():\n type_hint = \"'no type'\"\n elif isinstance(hint, tuple):\n type_hint = ', '.join([type_.__name__ for type_ in hint])\n else:\n type_hint = hint\n bad_type_name = type(self.value).__name__\n return 'Bad type for `{}`: {}. (Should be {}.)'.format(self.identifier, bad_type_name, type_hint)\n<|end_body_1|>\n", "revision_id": "8cb346fb1830a24af5640b948a85a578cc905db6", "skeleton": "<|skeleton|>\nclass DoxhooksTypeError:\n \"\"\"An argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\"\"\"\n\n def __init__(self, value, identifier, hint):\n \"\"\"Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DoxhooksTypeError:\n \"\"\"An argument or value is not of the specified type. `DoxhooksTypeError` extends `DoxhooksDataError`. Magic Methods ------------- __str__ Override `DoxhooksDataError.__str__` to compose a message.\"\"\"\n\n def __init__(self, value, identifier, hint):\n \"\"\"Initialise the error with parameters of the error message. The `DoxhooksTypeError` constructor overrides the `DoxhooksDataError` constructor with parameters of the error message. Parameters ---------- value The value that caused the error. identifier : str An identifier that the user associates with the value. hint : str, type or Tuple[type, ...] A hint as to the expected type. Attributes ---------- value The argument of `value`. identifier : str The argument of `identifier`. hint : str, type or Tuple[type, ...] The argument of `hint`.\"\"\"\n self.value = value\n self.identifier = identifier\n self.hint = hint\n\n def __str__(self):\n \"\"\"Return an error message composed from the error attributes. Overrides `DoxhooksDataError.__str__`. Returns ------- str The error message. Examples -------- >>> from doxhooks.errors import DoxhooksTypeError >>> str(DoxhooksTypeError(1, \"my_arg\", str)) 'Bad type for `my_arg`: int. (Should be str.)' >>> str(DoxhooksTypeError(1, \"my_arg\", (str, type(None)))) 'Bad type for `my_arg`: int. (Should be str, NoneType.)' >>> str(DoxhooksTypeError(1, \"my_arg\", \"str or None\")) 'Bad type for `my_arg`: int. (Should be str or None.)'\"\"\"\n hint = self.hint\n if isinstance(hint, type):\n type_hint = hint.__name__\n elif hint == ():\n type_hint = \"'no type'\"\n elif isinstance(hint, tuple):\n type_hint = ', '.join([type_.__name__ for type_ in hint])\n else:\n type_hint = hint\n bad_type_name = type(self.value).__name__\n return 'Bad type for `{}`: {}. (Should be {}.)'.format(self.identifier, bad_type_name, type_hint)\n", "source": "the_stack_v2_python_sparse", "source_path": "doxhooks/errors.py", "source_repo": "nre/Doxhooks", "split": "val", "star_events_count": 1} {"blob_id": "a68d4c07a335bb1c4e1f55f32006464f6a6a3794", "bodies": ["client = test_client.TestClient(context.node['baseurl'])\nwith pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, '_invalid_pid_')", "for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node['baseurl'])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n headers_lower = dict(((header.lower(), value) for header, value in headers))\n assert 'date' in headers_lower\n assert 'content-type' in headers_lower\n assert 'content-length' in headers_lower\n assert int(headers_lower['content-length']) == object_info.size\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower['date'])"], "bodies_text": "<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, '_invalid_pid_')\n<|end_body_0|>\n\n<|body_start_1|>\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node['baseurl'])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n headers_lower = dict(((header.lower(), value) for header, value in headers))\n assert 'date' in headers_lower\n assert 'content-type' in headers_lower\n assert 'content-length' in headers_lower\n assert int(headers_lower['content-length']) == object_info.size\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower['date'])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Test060Describe", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test060Describe:\n\n def test_010_describe_by_invalid_pid(self):\n \"\"\"404 NotFound when attempting to get description for non-existing object.\"\"\"\n <|body_0|>\n\n def test_020_describe_by_valid_pid(self):\n \"\"\"Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, '_invalid_pid_')\n<|end_body_0|>\n\n<|body_start_1|>\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node['baseurl'])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n headers_lower = dict(((header.lower(), value) for header, value in headers))\n assert 'date' in headers_lower\n assert 'content-type' in headers_lower\n assert 'content-length' in headers_lower\n assert int(headers_lower['content-length']) == object_info.size\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower['date'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000457", "length_bytes": 3506, "license_type": "permissive", "methods": [{"docstring": "404 NotFound when attempting to get description for non-existing object.", "name": "test_010_describe_by_invalid_pid", "signature": "def test_010_describe_by_invalid_pid(self)"}, {"docstring": "Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.", "name": "test_020_describe_by_valid_pid", "signature": "def test_020_describe_by_valid_pid(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000364", "prompt": "Implement the Python class `Test060Describe` described below.\n\nClass description:\nImplement the Test060Describe class.\n\nMethod signatures and docstrings:\n- def test_010_describe_by_invalid_pid(self): 404 NotFound when attempting to get description for non-existing object.\n- def test_020_describe_by_valid_pid(self): Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.", "prompted_full_text": "Implement the Python class `Test060Describe` described below.\n\nClass description:\nImplement the Test060Describe class.\n\nMethod signatures and docstrings:\n- def test_010_describe_by_invalid_pid(self): 404 NotFound when attempting to get description for non-existing object.\n- def test_020_describe_by_valid_pid(self): Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.\n\n<|skeleton|>\nclass Test060Describe:\n\n def test_010_describe_by_invalid_pid(self):\n \"\"\"404 NotFound when attempting to get description for non-existing object.\"\"\"\n <|body_0|>\n\n def test_020_describe_by_valid_pid(self):\n \"\"\"Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, '_invalid_pid_')\n<|end_body_0|>\n\n<|body_start_1|>\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node['baseurl'])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n headers_lower = dict(((header.lower(), value) for header, value in headers))\n assert 'date' in headers_lower\n assert 'content-type' in headers_lower\n assert 'content-length' in headers_lower\n assert int(headers_lower['content-length']) == object_info.size\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower['date'])\n<|end_body_1|>\n", "revision_id": "d72a9461894d9be7d71178fb7310101b8ef9066a", "skeleton": "<|skeleton|>\nclass Test060Describe:\n\n def test_010_describe_by_invalid_pid(self):\n \"\"\"404 NotFound when attempting to get description for non-existing object.\"\"\"\n <|body_0|>\n\n def test_020_describe_by_valid_pid(self):\n \"\"\"Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test060Describe:\n def test_010_describe_by_invalid_pid(self):\n \"\"\"404 NotFound when attempting to get description for non-existing object.\"\"\"\n client = test_client.TestClient(context.node['baseurl'])\n with pytest.raises(xml.parsers.expat.ExpatError):\n client.describe(context.TOKEN, '_invalid_pid_')\n\n def test_020_describe_by_valid_pid(self):\n \"\"\"Successful describe for known objects. - Verify that required headers are present. - Verify that the object length reported by describe matches what was reported by listObjects. - Verify that date header contains a valid date. - Verify that date header matches what was reported by listObjects.\"\"\"\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n client = test_client.TestClient(context.node['baseurl'])\n pid = object_info.identifier.value()\n response = client.describe(context.TOKEN, pid)\n headers = response.getheaders()\n headers_lower = dict(((header.lower(), value) for header, value in headers))\n assert 'date' in headers_lower\n assert 'content-type' in headers_lower\n assert 'content-length' in headers_lower\n assert int(headers_lower['content-length']) == object_info.size\n assert d1_common.date_time.dt_from_iso8601_str(headers_lower['date'])\n", "source": "the_stack_v2_python_sparse", "source_path": "test_utilities/src/d1_test/stress_tester/projects/_unit_test_bases_for_stress_tests/tier_1_mn_read_describe.py", "source_repo": "DataONEorg/d1_python", "split": "val", "star_events_count": 15} {"blob_id": "019bbe8993330f24e7ef58c357f23706227dbe38", "bodies": ["from supriya.tools import ugentools\nugen = abs(ugentools.HPZ1.ar(source=source)) > threshold\nreturn ugen", "from supriya.tools import ugentools\nugen = abs(ugentools.HPZ1.kr(source=source)) > threshold\nreturn ugen"], "bodies_text": "<|body_start_0|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.ar(source=source)) > threshold\n return ugen\n<|end_body_0|>\n\n<|body_start_1|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.kr(source=source)) > threshold\n return ugen\n<|end_body_1|>\n", "class_docstring": "Triggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }", "class_name": "Changed", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Changed:\n \"\"\"Triggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\"\"\"\n\n def ar(cls, source=None, threshold=0):\n \"\"\"Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_0|>\n\n def kr(cls, source=None, threshold=0):\n \"\"\"Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.ar(source=source)) > threshold\n return ugen\n<|end_body_0|>\n\n<|body_start_1|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.kr(source=source)) > threshold\n return ugen\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000458", "length_bytes": 2724, "license_type": "permissive", "methods": [{"docstring": "Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.", "name": "ar", "signature": "def ar(cls, source=None, threshold=0)"}, {"docstring": "Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.", "name": "kr", "signature": "def kr(cls, source=None, threshold=0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005055", "prompt": "Implement the Python class `Changed` described below.\n\nClass description:\nTriggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\n\nMethod signatures and docstrings:\n- def ar(cls, source=None, threshold=0): Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\n- def kr(cls, source=None, threshold=0): Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.", "prompted_full_text": "Implement the Python class `Changed` described below.\n\nClass description:\nTriggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\n\nMethod signatures and docstrings:\n- def ar(cls, source=None, threshold=0): Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\n- def kr(cls, source=None, threshold=0): Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\n\n<|skeleton|>\nclass Changed:\n \"\"\"Triggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\"\"\"\n\n def ar(cls, source=None, threshold=0):\n \"\"\"Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_0|>\n\n def kr(cls, source=None, threshold=0):\n \"\"\"Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.ar(source=source)) > threshold\n return ugen\n<|end_body_0|>\n\n<|body_start_1|>\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.kr(source=source)) > threshold\n return ugen\n<|end_body_1|>\n", "revision_id": "30f79a26e5c5f92514d09d7d31f62452caa2634a", "skeleton": "<|skeleton|>\nclass Changed:\n \"\"\"Triggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\"\"\"\n\n def ar(cls, source=None, threshold=0):\n \"\"\"Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_0|>\n\n def kr(cls, source=None, threshold=0):\n \"\"\"Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Changed:\n \"\"\"Triggers when a value changes. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] }\"\"\"\n\n def ar(cls, source=None, threshold=0):\n \"\"\"Constructs an audio-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.ar( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef 39e1f9d61589c4acaaf297cc961d65e4 { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.ar(source=source)) > threshold\n return ugen\n\n def kr(cls, source=None, threshold=0):\n \"\"\"Constructs a control-rate Changed. :: >>> source = ugentools.In.ar(bus=0) >>> changed = ugentools.Changed.kr( ... source=source, ... threshold=0, ... ) >>> print(str(changed)) SynthDef e2436271176995c6a0a5cac6d1553f8b { const_0:0.0 -> 0_In[0:bus] 0_In[0] -> 1_HPZ1[0:source] 1_HPZ1[0] -> 2_UnaryOpUGen:ABSOLUTE_VALUE[0:source] 2_UnaryOpUGen:ABSOLUTE_VALUE[0] -> 3_BinaryOpUGen:GREATER_THAN[0:left] const_0:0.0 -> 3_BinaryOpUGen:GREATER_THAN[1:right] } Returns ugen graph.\"\"\"\n from supriya.tools import ugentools\n ugen = abs(ugentools.HPZ1.kr(source=source)) > threshold\n return ugen\n", "source": "the_stack_v2_python_sparse", "source_path": "supriya/tools/ugentools/Changed.py", "source_repo": "andrewyoung1991/supriya", "split": "val", "star_events_count": 2} {"blob_id": "e3cc05e3c7f0953a6eb132179b3af8215fc42f65", "bodies": ["class ExceptionType1(Exception):\n pass\n\nclass ExceptionType2(Exception):\n pass\n\n@decorators.Memoize\ndef raiseExceptions():\n if raiseExceptions.count == 0:\n raiseExceptions.count += 1\n raise ExceptionType1()\n if raiseExceptions.count == 1:\n raise ExceptionType2()\nraiseExceptions.count = 0\nwith self.assertRaises(ExceptionType1):\n raiseExceptions()\nwith self.assertRaises(ExceptionType2):\n raiseExceptions()", "@decorators.Memoize\ndef memoized():\n memoized.count += 1\n return memoized.count\nmemoized.count = 0\n\ndef notMemoized():\n notMemoized.count += 1\n return notMemoized.count\nnotMemoized.count = 0\nself.assertEquals(memoized(), 1)\nself.assertEquals(memoized(), 1)\nself.assertEquals(memoized(), 1)\nself.assertEquals(notMemoized(), 1)\nself.assertEquals(notMemoized(), 2)\nself.assertEquals(notMemoized(), 3)", "@decorators.Memoize\ndef returnValueBasedOnArgsKwargs(a, k=0):\n return a + k\nself.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2)\nself.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3)\nself.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3)\nself.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6)"], "bodies_text": "<|body_start_0|>\n class ExceptionType1(Exception):\n pass\n\n class ExceptionType2(Exception):\n pass\n\n @decorators.Memoize\n def raiseExceptions():\n if raiseExceptions.count == 0:\n raiseExceptions.count += 1\n raise ExceptionType1()\n if raiseExceptions.count == 1:\n raise ExceptionType2()\n raiseExceptions.count = 0\n with self.assertRaises(ExceptionType1):\n raiseExceptions()\n with self.assertRaises(ExceptionType2):\n raiseExceptions()\n<|end_body_0|>\n\n<|body_start_1|>\n @decorators.Memoize\n def memoized():\n memoized.count += 1\n return memoized.count\n memoized.count = 0\n\n def notMemoized():\n notMemoized.count += 1\n return notMemoized.count\n notMemoized.count = 0\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(notMemoized(), 1)\n self.assertEquals(notMemoized(), 2)\n self.assertEquals(notMemoized(), 3)\n<|end_body_1|>\n\n<|body_start_2|>\n @decorators.Memoize\n def returnValueBasedOnArgsKwargs(a, k=0):\n return a + k\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2)\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "MemoizeDecoratorTest", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MemoizeDecoratorTest:\n\n def testFunctionExceptionNotMemoized(self):\n \"\"\"Tests that |Memoize| decorator does not cache exception results.\"\"\"\n <|body_0|>\n\n def testFunctionResultMemoized(self):\n \"\"\"Tests that |Memoize| decorator caches results.\"\"\"\n <|body_1|>\n\n def testFunctionMemoizedBasedOnArgs(self):\n \"\"\"Tests that |Memoize| caches results based on args and kwargs.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n class ExceptionType1(Exception):\n pass\n\n class ExceptionType2(Exception):\n pass\n\n @decorators.Memoize\n def raiseExceptions():\n if raiseExceptions.count == 0:\n raiseExceptions.count += 1\n raise ExceptionType1()\n if raiseExceptions.count == 1:\n raise ExceptionType2()\n raiseExceptions.count = 0\n with self.assertRaises(ExceptionType1):\n raiseExceptions()\n with self.assertRaises(ExceptionType2):\n raiseExceptions()\n<|end_body_0|>\n\n<|body_start_1|>\n @decorators.Memoize\n def memoized():\n memoized.count += 1\n return memoized.count\n memoized.count = 0\n\n def notMemoized():\n notMemoized.count += 1\n return notMemoized.count\n notMemoized.count = 0\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(notMemoized(), 1)\n self.assertEquals(notMemoized(), 2)\n self.assertEquals(notMemoized(), 3)\n<|end_body_1|>\n\n<|body_start_2|>\n @decorators.Memoize\n def returnValueBasedOnArgsKwargs(a, k=0):\n return a + k\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2)\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000459", "length_bytes": 1997, "license_type": "permissive", "methods": [{"docstring": "Tests that |Memoize| decorator does not cache exception results.", "name": "testFunctionExceptionNotMemoized", "signature": "def testFunctionExceptionNotMemoized(self)"}, {"docstring": "Tests that |Memoize| decorator caches results.", "name": "testFunctionResultMemoized", "signature": "def testFunctionResultMemoized(self)"}, {"docstring": "Tests that |Memoize| caches results based on args and kwargs.", "name": "testFunctionMemoizedBasedOnArgs", "signature": "def testFunctionMemoizedBasedOnArgs(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000387", "prompt": "Implement the Python class `MemoizeDecoratorTest` described below.\n\nClass description:\nImplement the MemoizeDecoratorTest class.\n\nMethod signatures and docstrings:\n- def testFunctionExceptionNotMemoized(self): Tests that |Memoize| decorator does not cache exception results.\n- def testFunctionResultMemoized(self): Tests that |Memoize| decorator caches results.\n- def testFunctionMemoizedBasedOnArgs(self): Tests that |Memoize| caches results based on args and kwargs.", "prompted_full_text": "Implement the Python class `MemoizeDecoratorTest` described below.\n\nClass description:\nImplement the MemoizeDecoratorTest class.\n\nMethod signatures and docstrings:\n- def testFunctionExceptionNotMemoized(self): Tests that |Memoize| decorator does not cache exception results.\n- def testFunctionResultMemoized(self): Tests that |Memoize| decorator caches results.\n- def testFunctionMemoizedBasedOnArgs(self): Tests that |Memoize| caches results based on args and kwargs.\n\n<|skeleton|>\nclass MemoizeDecoratorTest:\n\n def testFunctionExceptionNotMemoized(self):\n \"\"\"Tests that |Memoize| decorator does not cache exception results.\"\"\"\n <|body_0|>\n\n def testFunctionResultMemoized(self):\n \"\"\"Tests that |Memoize| decorator caches results.\"\"\"\n <|body_1|>\n\n def testFunctionMemoizedBasedOnArgs(self):\n \"\"\"Tests that |Memoize| caches results based on args and kwargs.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n class ExceptionType1(Exception):\n pass\n\n class ExceptionType2(Exception):\n pass\n\n @decorators.Memoize\n def raiseExceptions():\n if raiseExceptions.count == 0:\n raiseExceptions.count += 1\n raise ExceptionType1()\n if raiseExceptions.count == 1:\n raise ExceptionType2()\n raiseExceptions.count = 0\n with self.assertRaises(ExceptionType1):\n raiseExceptions()\n with self.assertRaises(ExceptionType2):\n raiseExceptions()\n<|end_body_0|>\n\n<|body_start_1|>\n @decorators.Memoize\n def memoized():\n memoized.count += 1\n return memoized.count\n memoized.count = 0\n\n def notMemoized():\n notMemoized.count += 1\n return notMemoized.count\n notMemoized.count = 0\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(notMemoized(), 1)\n self.assertEquals(notMemoized(), 2)\n self.assertEquals(notMemoized(), 3)\n<|end_body_1|>\n\n<|body_start_2|>\n @decorators.Memoize\n def returnValueBasedOnArgsKwargs(a, k=0):\n return a + k\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2)\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6)\n<|end_body_2|>\n", "revision_id": "53102de187a48ac2cfc241fef54dcbc29c453a8e", "skeleton": "<|skeleton|>\nclass MemoizeDecoratorTest:\n\n def testFunctionExceptionNotMemoized(self):\n \"\"\"Tests that |Memoize| decorator does not cache exception results.\"\"\"\n <|body_0|>\n\n def testFunctionResultMemoized(self):\n \"\"\"Tests that |Memoize| decorator caches results.\"\"\"\n <|body_1|>\n\n def testFunctionMemoizedBasedOnArgs(self):\n \"\"\"Tests that |Memoize| caches results based on args and kwargs.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MemoizeDecoratorTest:\n def testFunctionExceptionNotMemoized(self):\n \"\"\"Tests that |Memoize| decorator does not cache exception results.\"\"\"\n class ExceptionType1(Exception):\n pass\n\n class ExceptionType2(Exception):\n pass\n\n @decorators.Memoize\n def raiseExceptions():\n if raiseExceptions.count == 0:\n raiseExceptions.count += 1\n raise ExceptionType1()\n if raiseExceptions.count == 1:\n raise ExceptionType2()\n raiseExceptions.count = 0\n with self.assertRaises(ExceptionType1):\n raiseExceptions()\n with self.assertRaises(ExceptionType2):\n raiseExceptions()\n\n def testFunctionResultMemoized(self):\n \"\"\"Tests that |Memoize| decorator caches results.\"\"\"\n @decorators.Memoize\n def memoized():\n memoized.count += 1\n return memoized.count\n memoized.count = 0\n\n def notMemoized():\n notMemoized.count += 1\n return notMemoized.count\n notMemoized.count = 0\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(memoized(), 1)\n self.assertEquals(notMemoized(), 1)\n self.assertEquals(notMemoized(), 2)\n self.assertEquals(notMemoized(), 3)\n\n def testFunctionMemoizedBasedOnArgs(self):\n \"\"\"Tests that |Memoize| caches results based on args and kwargs.\"\"\"\n @decorators.Memoize\n def returnValueBasedOnArgsKwargs(a, k=0):\n return a + k\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2)\n self.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3)\n self.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6)\n", "source": "the_stack_v2_python_sparse", "source_path": "devil/devil/utils/decorators_test.py", "source_repo": "catapult-project/catapult", "split": "val", "star_events_count": 2032} {"blob_id": "bb0c5155111e0c6ad0be0dcc95af68e9fde7e24b", "bodies": ["for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('message'), None)\n self.assertContains(response, 'Error: No data for state {}'.format(s))", "create_null_states()\nfor s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertNotContains(response, '>Native American')", "create_states()\nfor s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertContains(response, '>Native American')"], "bodies_text": "<|body_start_0|>\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('message'), None)\n self.assertContains(response, 'Error: No data for state {}'.format(s))\n<|end_body_0|>\n\n<|body_start_1|>\n create_null_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertNotContains(response, '>Native American')\n<|end_body_1|>\n\n<|body_start_2|>\n create_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertContains(response, '>Native American')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "EducationStateDetailsViewTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EducationStateDetailsViewTest:\n\n def test_no_data(self):\n \"\"\"Make sure each state page renders if there is no database data.\"\"\"\n <|body_0|>\n\n def test_with_null_data(self):\n \"\"\"Make sure each state page renders if there is data in the database.\"\"\"\n <|body_1|>\n\n def test_with_data(self):\n \"\"\"Make sure each page renders if there is non-null data in the databasese\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('message'), None)\n self.assertContains(response, 'Error: No data for state {}'.format(s))\n<|end_body_0|>\n\n<|body_start_1|>\n create_null_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertNotContains(response, '>Native American')\n<|end_body_1|>\n\n<|body_start_2|>\n create_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertContains(response, '>Native American')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000460", "length_bytes": 9266, "license_type": "no_license", "methods": [{"docstring": "Make sure each state page renders if there is no database data.", "name": "test_no_data", "signature": "def test_no_data(self)"}, {"docstring": "Make sure each state page renders if there is data in the database.", "name": "test_with_null_data", "signature": "def test_with_null_data(self)"}, {"docstring": "Make sure each page renders if there is non-null data in the databasese", "name": "test_with_data", "signature": "def test_with_data(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000274", "prompt": "Implement the Python class `EducationStateDetailsViewTest` described below.\n\nClass description:\nImplement the EducationStateDetailsViewTest class.\n\nMethod signatures and docstrings:\n- def test_no_data(self): Make sure each state page renders if there is no database data.\n- def test_with_null_data(self): Make sure each state page renders if there is data in the database.\n- def test_with_data(self): Make sure each page renders if there is non-null data in the databasese", "prompted_full_text": "Implement the Python class `EducationStateDetailsViewTest` described below.\n\nClass description:\nImplement the EducationStateDetailsViewTest class.\n\nMethod signatures and docstrings:\n- def test_no_data(self): Make sure each state page renders if there is no database data.\n- def test_with_null_data(self): Make sure each state page renders if there is data in the database.\n- def test_with_data(self): Make sure each page renders if there is non-null data in the databasese\n\n<|skeleton|>\nclass EducationStateDetailsViewTest:\n\n def test_no_data(self):\n \"\"\"Make sure each state page renders if there is no database data.\"\"\"\n <|body_0|>\n\n def test_with_null_data(self):\n \"\"\"Make sure each state page renders if there is data in the database.\"\"\"\n <|body_1|>\n\n def test_with_data(self):\n \"\"\"Make sure each page renders if there is non-null data in the databasese\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('message'), None)\n self.assertContains(response, 'Error: No data for state {}'.format(s))\n<|end_body_0|>\n\n<|body_start_1|>\n create_null_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertNotContains(response, '>Native American')\n<|end_body_1|>\n\n<|body_start_2|>\n create_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertContains(response, '>Native American')\n<|end_body_2|>\n", "revision_id": "2a8e2dc4e9b3cb92d4d437b37e61940a9486b81f", "skeleton": "<|skeleton|>\nclass EducationStateDetailsViewTest:\n\n def test_no_data(self):\n \"\"\"Make sure each state page renders if there is no database data.\"\"\"\n <|body_0|>\n\n def test_with_null_data(self):\n \"\"\"Make sure each state page renders if there is data in the database.\"\"\"\n <|body_1|>\n\n def test_with_data(self):\n \"\"\"Make sure each page renders if there is non-null data in the databasese\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EducationStateDetailsViewTest:\n def test_no_data(self):\n \"\"\"Make sure each state page renders if there is no database data.\"\"\"\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('message'), None)\n self.assertContains(response, 'Error: No data for state {}'.format(s))\n\n def test_with_null_data(self):\n \"\"\"Make sure each state page renders if there is data in the database.\"\"\"\n create_null_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertNotContains(response, '>Native American')\n\n def test_with_data(self):\n \"\"\"Make sure each page renders if there is non-null data in the databasese\"\"\"\n create_states()\n for s in STATES:\n response = self.client.get(reverse('education:state_detail', args=(s,)))\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.context.get('data'), None)\n self.assertNotEqual(response.context.get('json_data'), None)\n self.assertContains(response, 'Students in 15-16 Cohort')\n self.assertContains(response, '>Native American')\n", "source": "the_stack_v2_python_sparse", "source_path": "education/tests.py", "source_repo": "smeds1/mysite", "split": "val", "star_events_count": 1} {"blob_id": "c72bc7da77279c44bbce212bca987b6e57de76dc", "bodies": ["flags.GetRepoArg().AddToParser(parser)\nbase.ASYNC_FLAG.AddToParser(parser)\nparser.add_argument('--source', metavar='SOURCE', required=True, help=' The path of a package to upload.')", "client = apis.GetClientInstance('artifactregistry', self.api_version)\nmessages = client.MESSAGES_MODULE\nclient.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'\nrepo_ref = args.CONCEPTS.repository.Parse()\nupload_req = messages.UploadYumArtifactRequest\nupload_request = upload_req()\nrequest = messages.ArtifactregistryProjectsLocationsRepositoriesYumArtifactsUploadRequest(uploadYumArtifactRequest=upload_request, parent=repo_ref.RelativeName())\nupload = transfer.Upload.FromFile(args.source, mime_type='application/x-rpm')\nop_obj = client.projects_locations_repositories_yumArtifacts.Upload(request, upload=upload)\nop = op_obj.operation\nop_ref = resources.REGISTRY.ParseRelativeName(op.name, collection='artifactregistry.projects.locations.operations')\nif args.async_:\n return op_ref\nelse:\n result = waiter.WaitFor(waiter.CloudOperationPollerNoResources(client.projects_locations_operations), op_ref, 'Uploading package')\n return result"], "bodies_text": "<|body_start_0|>\n flags.GetRepoArg().AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--source', metavar='SOURCE', required=True, help=' The path of a package to upload.')\n<|end_body_0|>\n\n<|body_start_1|>\n client = apis.GetClientInstance('artifactregistry', self.api_version)\n messages = client.MESSAGES_MODULE\n client.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'\n repo_ref = args.CONCEPTS.repository.Parse()\n upload_req = messages.UploadYumArtifactRequest\n upload_request = upload_req()\n request = messages.ArtifactregistryProjectsLocationsRepositoriesYumArtifactsUploadRequest(uploadYumArtifactRequest=upload_request, parent=repo_ref.RelativeName())\n upload = transfer.Upload.FromFile(args.source, mime_type='application/x-rpm')\n op_obj = client.projects_locations_repositories_yumArtifacts.Upload(request, upload=upload)\n op = op_obj.operation\n op_ref = resources.REGISTRY.ParseRelativeName(op.name, collection='artifactregistry.projects.locations.operations')\n if args.async_:\n return op_ref\n else:\n result = waiter.WaitFor(waiter.CloudOperationPollerNoResources(client.projects_locations_operations), op_ref, 'Uploading package')\n return result\n<|end_body_1|>\n", "class_docstring": "Upload an RPM package to an artifact repository.", "class_name": "Upload", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Upload:\n \"\"\"Upload an RPM package to an artifact repository.\"\"\"\n\n def Args(parser):\n \"\"\"Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run package import command.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags.GetRepoArg().AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--source', metavar='SOURCE', required=True, help=' The path of a package to upload.')\n<|end_body_0|>\n\n<|body_start_1|>\n client = apis.GetClientInstance('artifactregistry', self.api_version)\n messages = client.MESSAGES_MODULE\n client.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'\n repo_ref = args.CONCEPTS.repository.Parse()\n upload_req = messages.UploadYumArtifactRequest\n upload_request = upload_req()\n request = messages.ArtifactregistryProjectsLocationsRepositoriesYumArtifactsUploadRequest(uploadYumArtifactRequest=upload_request, parent=repo_ref.RelativeName())\n upload = transfer.Upload.FromFile(args.source, mime_type='application/x-rpm')\n op_obj = client.projects_locations_repositories_yumArtifacts.Upload(request, upload=upload)\n op = op_obj.operation\n op_ref = resources.REGISTRY.ParseRelativeName(op.name, collection='artifactregistry.projects.locations.operations')\n if args.async_:\n return op_ref\n else:\n result = waiter.WaitFor(waiter.CloudOperationPollerNoResources(client.projects_locations_operations), op_ref, 'Uploading package')\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000461", "length_bytes": 3131, "license_type": "permissive", "methods": [{"docstring": "Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "Run package import command.", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "prompt": "Implement the Python class `Upload` described below.\n\nClass description:\nUpload an RPM package to an artifact repository.\n\nMethod signatures and docstrings:\n- def Args(parser): Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\n- def Run(self, args): Run package import command.", "prompted_full_text": "Implement the Python class `Upload` described below.\n\nClass description:\nUpload an RPM package to an artifact repository.\n\nMethod signatures and docstrings:\n- def Args(parser): Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\n- def Run(self, args): Run package import command.\n\n<|skeleton|>\nclass Upload:\n \"\"\"Upload an RPM package to an artifact repository.\"\"\"\n\n def Args(parser):\n \"\"\"Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run package import command.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags.GetRepoArg().AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--source', metavar='SOURCE', required=True, help=' The path of a package to upload.')\n<|end_body_0|>\n\n<|body_start_1|>\n client = apis.GetClientInstance('artifactregistry', self.api_version)\n messages = client.MESSAGES_MODULE\n client.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'\n repo_ref = args.CONCEPTS.repository.Parse()\n upload_req = messages.UploadYumArtifactRequest\n upload_request = upload_req()\n request = messages.ArtifactregistryProjectsLocationsRepositoriesYumArtifactsUploadRequest(uploadYumArtifactRequest=upload_request, parent=repo_ref.RelativeName())\n upload = transfer.Upload.FromFile(args.source, mime_type='application/x-rpm')\n op_obj = client.projects_locations_repositories_yumArtifacts.Upload(request, upload=upload)\n op = op_obj.operation\n op_ref = resources.REGISTRY.ParseRelativeName(op.name, collection='artifactregistry.projects.locations.operations')\n if args.async_:\n return op_ref\n else:\n result = waiter.WaitFor(waiter.CloudOperationPollerNoResources(client.projects_locations_operations), op_ref, 'Uploading package')\n return result\n<|end_body_1|>\n", "revision_id": "392abf004b16203030e6efd2f0af24db7c8d669e", "skeleton": "<|skeleton|>\nclass Upload:\n \"\"\"Upload an RPM package to an artifact repository.\"\"\"\n\n def Args(parser):\n \"\"\"Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Run package import command.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Upload:\n \"\"\"Upload an RPM package to an artifact repository.\"\"\"\n\n def Args(parser):\n \"\"\"Set up arguements for this command. Args: parser: An argparse.ArgumentPaser.\"\"\"\n flags.GetRepoArg().AddToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.add_argument('--source', metavar='SOURCE', required=True, help=' The path of a package to upload.')\n\n def Run(self, args):\n \"\"\"Run package import command.\"\"\"\n client = apis.GetClientInstance('artifactregistry', self.api_version)\n messages = client.MESSAGES_MODULE\n client.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'\n repo_ref = args.CONCEPTS.repository.Parse()\n upload_req = messages.UploadYumArtifactRequest\n upload_request = upload_req()\n request = messages.ArtifactregistryProjectsLocationsRepositoriesYumArtifactsUploadRequest(uploadYumArtifactRequest=upload_request, parent=repo_ref.RelativeName())\n upload = transfer.Upload.FromFile(args.source, mime_type='application/x-rpm')\n op_obj = client.projects_locations_repositories_yumArtifacts.Upload(request, upload=upload)\n op = op_obj.operation\n op_ref = resources.REGISTRY.ParseRelativeName(op.name, collection='artifactregistry.projects.locations.operations')\n if args.async_:\n return op_ref\n else:\n result = waiter.WaitFor(waiter.CloudOperationPollerNoResources(client.projects_locations_operations), op_ref, 'Uploading package')\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/surface/artifacts/yum/upload.py", "source_repo": "google-cloud-sdk-unofficial/google-cloud-sdk", "split": "val", "star_events_count": 9} {"blob_id": "f9baafe5a1a83b9e04a5b3b64a78729a345e5ed8", "bodies": ["self.name = name\nself.space = space\nself.input_ = input_\nself.output_ = output_\nself.commands = commands", "print('-----------------------------------------------------------')\nprint(' Current Command List ')\nprint('-----------------------------------------------------------')\nfor cmd in self.commands:\n print('\\x1b[91m' + 'Command:'.ljust(16) + cmd.name + '\\x1b[0m')\n if cmd.name == 'E':\n print('which_qubits:'.ljust(15), cmd.which_qubits)\n elif cmd.name == 'M':\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('plane:'.ljust(15), cmd.plane)\n print('angle:'.ljust(15), cmd.angle)\n print('domain_s:'.ljust(15), cmd.domain_s)\n print('domain_t:'.ljust(15), cmd.domain_t)\n else:\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('domain:'.ljust(15), cmd.domain)\n print('-----------------------------------------------------------')"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.space = space\n self.input_ = input_\n self.output_ = output_\n self.commands = commands\n<|end_body_0|>\n\n<|body_start_1|>\n print('-----------------------------------------------------------')\n print(' Current Command List ')\n print('-----------------------------------------------------------')\n for cmd in self.commands:\n print('\\x1b[91m' + 'Command:'.ljust(16) + cmd.name + '\\x1b[0m')\n if cmd.name == 'E':\n print('which_qubits:'.ljust(15), cmd.which_qubits)\n elif cmd.name == 'M':\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('plane:'.ljust(15), cmd.plane)\n print('angle:'.ljust(15), cmd.angle)\n print('domain_s:'.ljust(15), cmd.domain_s)\n print('domain_t:'.ljust(15), cmd.domain_t)\n else:\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('domain:'.ljust(15), cmd.domain)\n print('-----------------------------------------------------------')\n<|end_body_1|>\n", "class_docstring": "Class for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list", "class_name": "Pattern", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pattern:\n \"\"\"Class for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n\n def __init__(self, name: str, space: list, input_: list, output_: list, commands: list):\n \"\"\"Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n <|body_0|>\n\n def print(self) -> None:\n \"\"\"Print all commands in the command list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.space = space\n self.input_ = input_\n self.output_ = output_\n self.commands = commands\n<|end_body_0|>\n\n<|body_start_1|>\n print('-----------------------------------------------------------')\n print(' Current Command List ')\n print('-----------------------------------------------------------')\n for cmd in self.commands:\n print('\\x1b[91m' + 'Command:'.ljust(16) + cmd.name + '\\x1b[0m')\n if cmd.name == 'E':\n print('which_qubits:'.ljust(15), cmd.which_qubits)\n elif cmd.name == 'M':\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('plane:'.ljust(15), cmd.plane)\n print('angle:'.ljust(15), cmd.angle)\n print('domain_s:'.ljust(15), cmd.domain_s)\n print('domain_t:'.ljust(15), cmd.domain_t)\n else:\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('domain:'.ljust(15), cmd.domain)\n print('-----------------------------------------------------------')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000462", "length_bytes": 7995, "license_type": "permissive", "methods": [{"docstring": "Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list", "name": "__init__", "signature": "def __init__(self, name: str, space: list, input_: list, output_: list, commands: list)"}, {"docstring": "Print all commands in the command list.", "name": "print", "signature": "def print(self) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000379", "prompt": "Implement the Python class `Pattern` described below.\n\nClass description:\nClass for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, space: list, input_: list, output_: list, commands: list): Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\n- def print(self) -> None: Print all commands in the command list.", "prompted_full_text": "Implement the Python class `Pattern` described below.\n\nClass description:\nClass for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, space: list, input_: list, output_: list, commands: list): Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\n- def print(self) -> None: Print all commands in the command list.\n\n<|skeleton|>\nclass Pattern:\n \"\"\"Class for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n\n def __init__(self, name: str, space: list, input_: list, output_: list, commands: list):\n \"\"\"Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n <|body_0|>\n\n def print(self) -> None:\n \"\"\"Print all commands in the command list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.space = space\n self.input_ = input_\n self.output_ = output_\n self.commands = commands\n<|end_body_0|>\n\n<|body_start_1|>\n print('-----------------------------------------------------------')\n print(' Current Command List ')\n print('-----------------------------------------------------------')\n for cmd in self.commands:\n print('\\x1b[91m' + 'Command:'.ljust(16) + cmd.name + '\\x1b[0m')\n if cmd.name == 'E':\n print('which_qubits:'.ljust(15), cmd.which_qubits)\n elif cmd.name == 'M':\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('plane:'.ljust(15), cmd.plane)\n print('angle:'.ljust(15), cmd.angle)\n print('domain_s:'.ljust(15), cmd.domain_s)\n print('domain_t:'.ljust(15), cmd.domain_t)\n else:\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('domain:'.ljust(15), cmd.domain)\n print('-----------------------------------------------------------')\n<|end_body_1|>\n", "revision_id": "8bc3c7238b5b6825eb63ded8d65afb08b389941f", "skeleton": "<|skeleton|>\nclass Pattern:\n \"\"\"Class for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n\n def __init__(self, name: str, space: list, input_: list, output_: list, commands: list):\n \"\"\"Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n <|body_0|>\n\n def print(self) -> None:\n \"\"\"Print all commands in the command list.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Pattern:\n \"\"\"Class for creating a measurement pattern. This class represents the measurement pattern in the MBQC model. Please refer to [The measurement calculus, arXiv: 0704.1263] for technical details. Attributes: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n\n def __init__(self, name: str, space: list, input_: list, output_: list, commands: list):\n \"\"\"Constructor for Pattern class. Args: name (str): pattern name space (list): space vertices input_ (list): input vertices output_ (list): output vertices commands (list): command list\"\"\"\n self.name = name\n self.space = space\n self.input_ = input_\n self.output_ = output_\n self.commands = commands\n\n def print(self) -> None:\n \"\"\"Print all commands in the command list.\"\"\"\n print('-----------------------------------------------------------')\n print(' Current Command List ')\n print('-----------------------------------------------------------')\n for cmd in self.commands:\n print('\\x1b[91m' + 'Command:'.ljust(16) + cmd.name + '\\x1b[0m')\n if cmd.name == 'E':\n print('which_qubits:'.ljust(15), cmd.which_qubits)\n elif cmd.name == 'M':\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('plane:'.ljust(15), cmd.plane)\n print('angle:'.ljust(15), cmd.angle)\n print('domain_s:'.ljust(15), cmd.domain_s)\n print('domain_t:'.ljust(15), cmd.domain_t)\n else:\n print('which_qubit:'.ljust(15), cmd.which_qubit)\n print('domain:'.ljust(15), cmd.domain)\n print('-----------------------------------------------------------')\n", "source": "the_stack_v2_python_sparse", "source_path": "Extensions/QuantumNetwork/qcompute_qnet/quantum/pattern.py", "source_repo": "baidu/QCompute", "split": "val", "star_events_count": 86} {"blob_id": "256cdebf8b50468f9ff1f9269b87676f543600eb", "bodies": ["if not root:\n return None\nP = self.ancestor(root, p)\nQ = self.ancestor(root, q)\nh = min(len(P), len(Q))\nfor i in xrange(h):\n if P[i] is Q[i]:\n result = P[i]\n else:\n break\nreturn result", "if not root:\n return []\nif root is dis:\n return [dis]\nl = self.ancestor(root.left, dis)\nr = self.ancestor(root.right, dis)\nif not l + r:\n return []\nreturn [root] + l + r"], "bodies_text": "<|body_start_0|>\n if not root:\n return None\n P = self.ancestor(root, p)\n Q = self.ancestor(root, q)\n h = min(len(P), len(Q))\n for i in xrange(h):\n if P[i] is Q[i]:\n result = P[i]\n else:\n break\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n if root is dis:\n return [dis]\n l = self.ancestor(root.left, dis)\n r = self.ancestor(root.right, dis)\n if not l + r:\n return []\n return [root] + l + r\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def ancestor(self, root, dis):\n \"\"\"根据祖先找到目标节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n P = self.ancestor(root, p)\n Q = self.ancestor(root, q)\n h = min(len(P), len(Q))\n for i in xrange(h):\n if P[i] is Q[i]:\n result = P[i]\n else:\n break\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n if root is dis:\n return [dis]\n l = self.ancestor(root.left, dis)\n r = self.ancestor(root.right, dis)\n if not l + r:\n return []\n return [root] + l + r\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000463", "length_bytes": 1738, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode", "name": "lowestCommonAncestor", "signature": "def lowestCommonAncestor(self, root, p, q)"}, {"docstring": "根据祖先找到目标节点", "name": "ancestor", "signature": "def ancestor(self, root, dis)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002610", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lowestCommonAncestor(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def ancestor(self, root, dis): 根据祖先找到目标节点", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lowestCommonAncestor(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def ancestor(self, root, dis): 根据祖先找到目标节点\n\n<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def ancestor(self, root, dis):\n \"\"\"根据祖先找到目标节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n P = self.ancestor(root, p)\n Q = self.ancestor(root, q)\n h = min(len(P), len(Q))\n for i in xrange(h):\n if P[i] is Q[i]:\n result = P[i]\n else:\n break\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n if root is dis:\n return [dis]\n l = self.ancestor(root.left, dis)\n r = self.ancestor(root.right, dis)\n if not l + r:\n return []\n return [root] + l + r\n<|end_body_1|>\n", "revision_id": "9687f8e743a8b6396fff192f22b5256d1025f86b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def ancestor(self, root, dis):\n \"\"\"根据祖先找到目标节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def lowestCommonAncestor(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n if not root:\n return None\n P = self.ancestor(root, p)\n Q = self.ancestor(root, q)\n h = min(len(P), len(Q))\n for i in xrange(h):\n if P[i] is Q[i]:\n result = P[i]\n else:\n break\n return result\n\n def ancestor(self, root, dis):\n \"\"\"根据祖先找到目标节点\"\"\"\n if not root:\n return []\n if root is dis:\n return [dis]\n l = self.ancestor(root.left, dis)\n r = self.ancestor(root.right, dis)\n if not l + r:\n return []\n return [root] + l + r\n", "source": "the_stack_v2_python_sparse", "source_path": "2017/tree/Lowest_Common_Ancestor_of_a_Binary_Tree.py", "source_repo": "buhuipao/LeetCode", "split": "val", "star_events_count": 5} {"blob_id": "df31b4139442de6803187c49752cc1f09bca6b39", "bodies": ["inputs = [x.strip('[]\"\\n') for x in sys_stdin]\na = [self.cast(x) for x in inputs[0].split(',')]\no = TreeNode().convert(a)\nx = int(inputs[1])\nreturn (o, x)", "if x.lower() == 'null':\n return None\nelse:\n return int(x)"], "bodies_text": "<|body_start_0|>\n inputs = [x.strip('[]\"\\n') for x in sys_stdin]\n a = [self.cast(x) for x in inputs[0].split(',')]\n o = TreeNode().convert(a)\n x = int(inputs[1])\n return (o, x)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.lower() == 'null':\n return None\n else:\n return int(x)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Input", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Input:\n\n def stdin(self, sys_stdin):\n \"\"\"Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\"\"\"\n <|body_0|>\n\n def cast(self, x):\n \"\"\"Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n inputs = [x.strip('[]\"\\n') for x in sys_stdin]\n a = [self.cast(x) for x in inputs[0].split(',')]\n o = TreeNode().convert(a)\n x = int(inputs[1])\n return (o, x)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.lower() == 'null':\n return None\n else:\n return int(x)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000464", "length_bytes": 2708, "license_type": "permissive", "methods": [{"docstring": "Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object", "name": "stdin", "signature": "def stdin(self, sys_stdin)"}, {"docstring": "Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None", "name": "cast", "signature": "def cast(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003110", "prompt": "Implement the Python class `Input` described below.\n\nClass description:\nImplement the Input class.\n\nMethod signatures and docstrings:\n- def stdin(self, sys_stdin): Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\n- def cast(self, x): Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None", "prompted_full_text": "Implement the Python class `Input` described below.\n\nClass description:\nImplement the Input class.\n\nMethod signatures and docstrings:\n- def stdin(self, sys_stdin): Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\n- def cast(self, x): Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None\n\n<|skeleton|>\nclass Input:\n\n def stdin(self, sys_stdin):\n \"\"\"Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\"\"\"\n <|body_0|>\n\n def cast(self, x):\n \"\"\"Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n inputs = [x.strip('[]\"\\n') for x in sys_stdin]\n a = [self.cast(x) for x in inputs[0].split(',')]\n o = TreeNode().convert(a)\n x = int(inputs[1])\n return (o, x)\n<|end_body_0|>\n\n<|body_start_1|>\n if x.lower() == 'null':\n return None\n else:\n return int(x)\n<|end_body_1|>\n", "revision_id": "69f90877c5466927e8b081c4268cbcda074813ec", "skeleton": "<|skeleton|>\nclass Input:\n\n def stdin(self, sys_stdin):\n \"\"\"Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\"\"\"\n <|body_0|>\n\n def cast(self, x):\n \"\"\"Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Input:\n def stdin(self, sys_stdin):\n \"\"\"Imports standard input. :param _io.TextIOWrapper sys_stdin: standard input :return: root node of binary tree :rtype: TreeNode object\"\"\"\n inputs = [x.strip('[]\"\\n') for x in sys_stdin]\n a = [self.cast(x) for x in inputs[0].split(',')]\n o = TreeNode().convert(a)\n x = int(inputs[1])\n return (o, x)\n\n def cast(self, x):\n \"\"\"Converts string values to integer or None values. :param str x: string input parameter :return: converted integer or None value :rtype: int or None\"\"\"\n if x.lower() == 'null':\n return None\n else:\n return int(x)\n", "source": "the_stack_v2_python_sparse", "source_path": "0701_insert_into_binary_search_tree/python_source.py", "source_repo": "arthurdysart/LeetCode", "split": "val", "star_events_count": 0} {"blob_id": "624ab166f79ebe69454a10e5cfb45de3f12f815c", "bodies": ["if not lists:\n return None\nif len(lists) == 1:\n return lists[0]\nif len(lists) == 2:\n return self.mergeTwoLists(lists[0], lists[1])\nreturn self.mergeTwoLists(self.mergeKLists(lists[:len(lists) / 2]), self.mergeKLists(lists[len(lists) / 2:]))", "res = ListNode(0)\nlast = res\nwhile l1 or l2:\n if l1 == None:\n last.next = l2\n return res.next\n if l2 == None:\n last.next = l1\n return res.next\n if l1.val < l2.val:\n last.next = l1\n last = last.next\n l1 = l1.next\n else:\n last.next = l2\n last = last.next\n l2 = l2.next\nreturn res.next"], "bodies_text": "<|body_start_0|>\n if not lists:\n return None\n if len(lists) == 1:\n return lists[0]\n if len(lists) == 2:\n return self.mergeTwoLists(lists[0], lists[1])\n return self.mergeTwoLists(self.mergeKLists(lists[:len(lists) / 2]), self.mergeKLists(lists[len(lists) / 2:]))\n<|end_body_0|>\n\n<|body_start_1|>\n res = ListNode(0)\n last = res\n while l1 or l2:\n if l1 == None:\n last.next = l2\n return res.next\n if l2 == None:\n last.next = l1\n return res.next\n if l1.val < l2.val:\n last.next = l1\n last = last.next\n l1 = l1.next\n else:\n last.next = l2\n last = last.next\n l2 = l2.next\n return res.next\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def mergeKLists(self, lists):\n \"\"\":type lists: List[ListNode] :rtype: ListNode\"\"\"\n <|body_0|>\n\n def mergeTwoLists(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not lists:\n return None\n if len(lists) == 1:\n return lists[0]\n if len(lists) == 2:\n return self.mergeTwoLists(lists[0], lists[1])\n return self.mergeTwoLists(self.mergeKLists(lists[:len(lists) / 2]), self.mergeKLists(lists[len(lists) / 2:]))\n<|end_body_0|>\n\n<|body_start_1|>\n res = ListNode(0)\n last = res\n while l1 or l2:\n if l1 == None:\n last.next = l2\n return res.next\n if l2 == None:\n last.next = l1\n return res.next\n if l1.val < l2.val:\n last.next = l1\n last = last.next\n l1 = l1.next\n else:\n last.next = l2\n last = last.next\n l2 = l2.next\n return res.next\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000465", "length_bytes": 1455, "license_type": "permissive", "methods": [{"docstring": ":type lists: List[ListNode] :rtype: ListNode", "name": "mergeKLists", "signature": "def mergeKLists(self, lists)"}, {"docstring": ":type l1: ListNode :type l2: ListNode :rtype: ListNode", "name": "mergeTwoLists", "signature": "def mergeTwoLists(self, l1, l2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005123", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def mergeKLists(self, lists): :type lists: List[ListNode] :rtype: ListNode\n- def mergeTwoLists(self, l1, l2): :type l1: ListNode :type l2: ListNode :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def mergeKLists(self, lists): :type lists: List[ListNode] :rtype: ListNode\n- def mergeTwoLists(self, l1, l2): :type l1: ListNode :type l2: ListNode :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def mergeKLists(self, lists):\n \"\"\":type lists: List[ListNode] :rtype: ListNode\"\"\"\n <|body_0|>\n\n def mergeTwoLists(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not lists:\n return None\n if len(lists) == 1:\n return lists[0]\n if len(lists) == 2:\n return self.mergeTwoLists(lists[0], lists[1])\n return self.mergeTwoLists(self.mergeKLists(lists[:len(lists) / 2]), self.mergeKLists(lists[len(lists) / 2:]))\n<|end_body_0|>\n\n<|body_start_1|>\n res = ListNode(0)\n last = res\n while l1 or l2:\n if l1 == None:\n last.next = l2\n return res.next\n if l2 == None:\n last.next = l1\n return res.next\n if l1.val < l2.val:\n last.next = l1\n last = last.next\n l1 = l1.next\n else:\n last.next = l2\n last = last.next\n l2 = l2.next\n return res.next\n<|end_body_1|>\n", "revision_id": "64747eb172c2ecb3c889830246f3282669516e10", "skeleton": "<|skeleton|>\nclass Solution:\n\n def mergeKLists(self, lists):\n \"\"\":type lists: List[ListNode] :rtype: ListNode\"\"\"\n <|body_0|>\n\n def mergeTwoLists(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def mergeKLists(self, lists):\n \"\"\":type lists: List[ListNode] :rtype: ListNode\"\"\"\n if not lists:\n return None\n if len(lists) == 1:\n return lists[0]\n if len(lists) == 2:\n return self.mergeTwoLists(lists[0], lists[1])\n return self.mergeTwoLists(self.mergeKLists(lists[:len(lists) / 2]), self.mergeKLists(lists[len(lists) / 2:]))\n\n def mergeTwoLists(self, l1, l2):\n \"\"\":type l1: ListNode :type l2: ListNode :rtype: ListNode\"\"\"\n res = ListNode(0)\n last = res\n while l1 or l2:\n if l1 == None:\n last.next = l2\n return res.next\n if l2 == None:\n last.next = l1\n return res.next\n if l1.val < l2.val:\n last.next = l1\n last = last.next\n l1 = l1.next\n else:\n last.next = l2\n last = last.next\n l2 = l2.next\n return res.next\n", "source": "the_stack_v2_python_sparse", "source_path": "LC/23.py", "source_repo": "szhu3210/LeetCode_Solutions", "split": "val", "star_events_count": 2} {"blob_id": "2a6b58759dd45acb94396a2aaee090e46a38aab6", "bodies": ["cr, uid, context = self.env.args\ncontext = dict(context)\nfor rec in self:\n rec.write({'date_from': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': True})\nreturn True", "cr, uid, context = self.env.args\ncontext = dict(context)\nfor rec in self:\n rec.write({'date_to': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': False})\nreturn True"], "bodies_text": "<|body_start_0|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_from': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': True})\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_to': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': False})\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "employee_away_history", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass employee_away_history:\n\n def set_date_from(self):\n \"\"\"This method is used to set the Date From field based on Leave Button ------------------------------------------------\"\"\"\n <|body_0|>\n\n def set_date_to(self):\n \"\"\"This method is used to set the Date To field based on Return Button ------------------------------------------------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_from': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': True})\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_to': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': False})\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000466", "length_bytes": 2760, "license_type": "no_license", "methods": [{"docstring": "This method is used to set the Date From field based on Leave Button ------------------------------------------------", "name": "set_date_from", "signature": "def set_date_from(self)"}, {"docstring": "This method is used to set the Date To field based on Return Button ------------------------------------------------", "name": "set_date_to", "signature": "def set_date_to(self)"}], "n_methods": 2, "prompt": "Implement the Python class `employee_away_history` described below.\n\nClass description:\nImplement the employee_away_history class.\n\nMethod signatures and docstrings:\n- def set_date_from(self): This method is used to set the Date From field based on Leave Button ------------------------------------------------\n- def set_date_to(self): This method is used to set the Date To field based on Return Button ------------------------------------------------", "prompted_full_text": "Implement the Python class `employee_away_history` described below.\n\nClass description:\nImplement the employee_away_history class.\n\nMethod signatures and docstrings:\n- def set_date_from(self): This method is used to set the Date From field based on Leave Button ------------------------------------------------\n- def set_date_to(self): This method is used to set the Date To field based on Return Button ------------------------------------------------\n\n<|skeleton|>\nclass employee_away_history:\n\n def set_date_from(self):\n \"\"\"This method is used to set the Date From field based on Leave Button ------------------------------------------------\"\"\"\n <|body_0|>\n\n def set_date_to(self):\n \"\"\"This method is used to set the Date To field based on Return Button ------------------------------------------------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_from': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': True})\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_to': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': False})\n return True\n<|end_body_1|>\n", "revision_id": "46e15330b5d642053da61754247f3fbf9d02717e", "skeleton": "<|skeleton|>\nclass employee_away_history:\n\n def set_date_from(self):\n \"\"\"This method is used to set the Date From field based on Leave Button ------------------------------------------------\"\"\"\n <|body_0|>\n\n def set_date_to(self):\n \"\"\"This method is used to set the Date To field based on Return Button ------------------------------------------------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class employee_away_history:\n def set_date_from(self):\n \"\"\"This method is used to set the Date From field based on Leave Button ------------------------------------------------\"\"\"\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_from': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': True})\n return True\n\n def set_date_to(self):\n \"\"\"This method is used to set the Date To field based on Return Button ------------------------------------------------\"\"\"\n cr, uid, context = self.env.args\n context = dict(context)\n for rec in self:\n rec.write({'date_to': time.strftime(DEFAULT_SERVER_DATE_FORMAT)})\n emp_rec = self.env['hr.employee'].search([('id', '=', rec.emp_id.id)])\n if emp_rec:\n emp_rec.write({'away': False})\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "core/sg_accommodation/models/employee_away_history.py", "source_repo": "Muhammad-SF/Test", "split": "val", "star_events_count": 0} {"blob_id": "5ba2fe8332f07253dd137918426f1b1a92201894", "bodies": ["self.train_x = train_x\nself.train_class = train_class\nreturn", "pred_class = np.zeros(x_data.shape[0])\nfor i, x in enumerate(x_data):\n distancesq = ((x - self.train_x) ** 2).sum(axis=1)\n nn_ind = np.argsort(distancesq)[:k_neighbors]\n nn_class, counts = np.unique(self.train_class[nn_ind], return_counts=True)\n pred_class[i] += nn_class[np.argmax(counts)]\nreturn pred_class"], "bodies_text": "<|body_start_0|>\n self.train_x = train_x\n self.train_class = train_class\n return\n<|end_body_0|>\n\n<|body_start_1|>\n pred_class = np.zeros(x_data.shape[0])\n for i, x in enumerate(x_data):\n distancesq = ((x - self.train_x) ** 2).sum(axis=1)\n nn_ind = np.argsort(distancesq)[:k_neighbors]\n nn_class, counts = np.unique(self.train_class[nn_ind], return_counts=True)\n pred_class[i] += nn_class[np.argmax(counts)]\n return pred_class\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KNN", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KNN:\n\n def __init__(self, train_x, train_class):\n \"\"\"Initialize the data set that classification will be based on.\"\"\"\n <|body_0|>\n\n def predict(self, x_data, k_neighbors):\n \"\"\"Find the nearest k-neighbors and classify the new set of data points.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.train_x = train_x\n self.train_class = train_class\n return\n<|end_body_0|>\n\n<|body_start_1|>\n pred_class = np.zeros(x_data.shape[0])\n for i, x in enumerate(x_data):\n distancesq = ((x - self.train_x) ** 2).sum(axis=1)\n nn_ind = np.argsort(distancesq)[:k_neighbors]\n nn_class, counts = np.unique(self.train_class[nn_ind], return_counts=True)\n pred_class[i] += nn_class[np.argmax(counts)]\n return pred_class\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000467", "length_bytes": 925, "license_type": "no_license", "methods": [{"docstring": "Initialize the data set that classification will be based on.", "name": "__init__", "signature": "def __init__(self, train_x, train_class)"}, {"docstring": "Find the nearest k-neighbors and classify the new set of data points.", "name": "predict", "signature": "def predict(self, x_data, k_neighbors)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005364", "prompt": "Implement the Python class `KNN` described below.\n\nClass description:\nImplement the KNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, train_x, train_class): Initialize the data set that classification will be based on.\n- def predict(self, x_data, k_neighbors): Find the nearest k-neighbors and classify the new set of data points.", "prompted_full_text": "Implement the Python class `KNN` described below.\n\nClass description:\nImplement the KNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, train_x, train_class): Initialize the data set that classification will be based on.\n- def predict(self, x_data, k_neighbors): Find the nearest k-neighbors and classify the new set of data points.\n\n<|skeleton|>\nclass KNN:\n\n def __init__(self, train_x, train_class):\n \"\"\"Initialize the data set that classification will be based on.\"\"\"\n <|body_0|>\n\n def predict(self, x_data, k_neighbors):\n \"\"\"Find the nearest k-neighbors and classify the new set of data points.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.train_x = train_x\n self.train_class = train_class\n return\n<|end_body_0|>\n\n<|body_start_1|>\n pred_class = np.zeros(x_data.shape[0])\n for i, x in enumerate(x_data):\n distancesq = ((x - self.train_x) ** 2).sum(axis=1)\n nn_ind = np.argsort(distancesq)[:k_neighbors]\n nn_class, counts = np.unique(self.train_class[nn_ind], return_counts=True)\n pred_class[i] += nn_class[np.argmax(counts)]\n return pred_class\n<|end_body_1|>\n", "revision_id": "6cd204abbc074734fb7e8ca0e693a15e1cbe4ede", "skeleton": "<|skeleton|>\nclass KNN:\n\n def __init__(self, train_x, train_class):\n \"\"\"Initialize the data set that classification will be based on.\"\"\"\n <|body_0|>\n\n def predict(self, x_data, k_neighbors):\n \"\"\"Find the nearest k-neighbors and classify the new set of data points.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KNN:\n def __init__(self, train_x, train_class):\n \"\"\"Initialize the data set that classification will be based on.\"\"\"\n self.train_x = train_x\n self.train_class = train_class\n return\n\n def predict(self, x_data, k_neighbors):\n \"\"\"Find the nearest k-neighbors and classify the new set of data points.\"\"\"\n pred_class = np.zeros(x_data.shape[0])\n for i, x in enumerate(x_data):\n distancesq = ((x - self.train_x) ** 2).sum(axis=1)\n nn_ind = np.argsort(distancesq)[:k_neighbors]\n nn_class, counts = np.unique(self.train_class[nn_ind], return_counts=True)\n pred_class[i] += nn_class[np.argmax(counts)]\n return pred_class\n", "source": "the_stack_v2_python_sparse", "source_path": "EE565/Project1_GarciaJ/codes/requiredFunctions/kNearestNeighbors.py", "source_repo": "JorgeAGR/nmsu-course-work", "split": "val", "star_events_count": 0} {"blob_id": "6466bc8b6b9c9df8277539a269711649b5410501", "bodies": ["response = self.client.get(reverse('index'))\nself.assertEqual(response.status_code, 200)\nself.assertQuerysetEqual(response.context['listings'], [])", "listing = Listing.objects.create()\nresponse = self.client.get(reverse('index'))\nself.assertQuerysetEqual(response.context['listings'], [listing])"], "bodies_text": "<|body_start_0|>\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['listings'], [])\n<|end_body_0|>\n\n<|body_start_1|>\n listing = Listing.objects.create()\n response = self.client.get(reverse('index'))\n self.assertQuerysetEqual(response.context['listings'], [listing])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "IndexViewTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IndexViewTests:\n\n def test_no_listings(self):\n \"\"\"If no listings exist, none are displayed on the index page.\"\"\"\n <|body_0|>\n\n def test_display_listings(self):\n \"\"\"Index view displays all created listings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['listings'], [])\n<|end_body_0|>\n\n<|body_start_1|>\n listing = Listing.objects.create()\n response = self.client.get(reverse('index'))\n self.assertQuerysetEqual(response.context['listings'], [listing])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000468", "length_bytes": 1005, "license_type": "no_license", "methods": [{"docstring": "If no listings exist, none are displayed on the index page.", "name": "test_no_listings", "signature": "def test_no_listings(self)"}, {"docstring": "Index view displays all created listings.", "name": "test_display_listings", "signature": "def test_display_listings(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002616", "prompt": "Implement the Python class `IndexViewTests` described below.\n\nClass description:\nImplement the IndexViewTests class.\n\nMethod signatures and docstrings:\n- def test_no_listings(self): If no listings exist, none are displayed on the index page.\n- def test_display_listings(self): Index view displays all created listings.", "prompted_full_text": "Implement the Python class `IndexViewTests` described below.\n\nClass description:\nImplement the IndexViewTests class.\n\nMethod signatures and docstrings:\n- def test_no_listings(self): If no listings exist, none are displayed on the index page.\n- def test_display_listings(self): Index view displays all created listings.\n\n<|skeleton|>\nclass IndexViewTests:\n\n def test_no_listings(self):\n \"\"\"If no listings exist, none are displayed on the index page.\"\"\"\n <|body_0|>\n\n def test_display_listings(self):\n \"\"\"Index view displays all created listings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['listings'], [])\n<|end_body_0|>\n\n<|body_start_1|>\n listing = Listing.objects.create()\n response = self.client.get(reverse('index'))\n self.assertQuerysetEqual(response.context['listings'], [listing])\n<|end_body_1|>\n", "revision_id": "3e5394d4c54188aa371c46a112be7791d829dc7e", "skeleton": "<|skeleton|>\nclass IndexViewTests:\n\n def test_no_listings(self):\n \"\"\"If no listings exist, none are displayed on the index page.\"\"\"\n <|body_0|>\n\n def test_display_listings(self):\n \"\"\"Index view displays all created listings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IndexViewTests:\n def test_no_listings(self):\n \"\"\"If no listings exist, none are displayed on the index page.\"\"\"\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['listings'], [])\n\n def test_display_listings(self):\n \"\"\"Index view displays all created listings.\"\"\"\n listing = Listing.objects.create()\n response = self.client.get(reverse('index'))\n self.assertQuerysetEqual(response.context['listings'], [listing])\n", "source": "the_stack_v2_python_sparse", "source_path": "auctions/tests.py", "source_repo": "andkerr/mytoyauctionsite", "split": "val", "star_events_count": 0} {"blob_id": "4e4b1a7ff5ce756e7f2fee2ea11ec8f8bd3d1408", "bodies": ["self.params = params\nself.task_labels = task_labels\nself.cache_dir = tempfile.gettempdir()", "tf.compat.v1.reset_default_graph()\nconfig = transformers.BertConfig.from_pretrained(os.path.join(self.params.bert_path, 'config.json'), cache_dir=self.cache_dir)\nmodel = transformers.TFBertModel.from_pretrained(os.path.join(self.params.bert_path, 'tf_model.h5'), config=config, cache_dir=self.cache_dir)\ninputs = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='inputs')\natten = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='atten')\nhidden = model([inputs, atten])[1]\ndrop_hidden = tf.keras.layers.Dropout(0.1)(hidden, training=self.params.mc_dropout)\nlogits = dict()\nfor task_label in self.task_labels:\n logits[task_label] = tf.keras.layers.Dense(labels, activation='sigmoid', name=task_label)(drop_hidden)\nself.tf_model = tf.keras.Model({'inputs': inputs, 'atten': atten}, logits)", "cb_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.params.early_stopping_check, min_delta=self.params.min_epoch_change)\ncb_scheduler = tf.keras.callbacks.LearningRateScheduler(utils.reduce_learning_rate)\nself.tf_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.params.lr), loss=loss_function)\ny = {task_label: train_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\nval_y = {task_label: val_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\nhistory = self.tf_model.fit(x={'inputs': train_batches['inputs'], 'atten': train_batches['attentions']}, y=y, epochs=self.params.n_epoch, callbacks=[cb_early, cb_scheduler], validation_data=({'inputs': val_batches['inputs'], 'atten': val_batches['attentions']}, val_y), class_weight=weights)\nreturn len(history.history['loss'])", "results = collections.defaultdict(list)\nif 'majority' in batches.keys():\n results['majority'] = batches['majority']\nif 'text_id' in batches.keys():\n results['text_id'] = batches['text_id']\nlogits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\nfor i, task_label in enumerate(self.task_labels):\n predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n results[task_label + '_pred'] = predictions\n if task_label in batches.keys():\n results[task_label + '_label'] = batches[task_label]\n if len(self.task_labels) == 1:\n results[task_label + '_logit'] = logits.flatten()\nreturn pd.DataFrame(results)", "results = collections.defaultdict(list)\ndropout_predictions = np.empty((0, batches['inputs'].shape[0], 1))\nfor i, task_label in enumerate(self.task_labels):\n for _ in range(self.params.mc_passes):\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n mc_predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n dropout_predictions = np.vstack((dropout_predictions, mc_predictions[np.newaxis, :, np.newaxis]))\n results[task_label + '_mean'] = list(np.squeeze(np.mean(dropout_predictions, axis=0)))\n results[task_label + '_variance'] = list(np.squeeze(np.var(dropout_predictions, axis=0)))\nreturn pd.DataFrame(results)"], "bodies_text": "<|body_start_0|>\n self.params = params\n self.task_labels = task_labels\n self.cache_dir = tempfile.gettempdir()\n<|end_body_0|>\n\n<|body_start_1|>\n tf.compat.v1.reset_default_graph()\n config = transformers.BertConfig.from_pretrained(os.path.join(self.params.bert_path, 'config.json'), cache_dir=self.cache_dir)\n model = transformers.TFBertModel.from_pretrained(os.path.join(self.params.bert_path, 'tf_model.h5'), config=config, cache_dir=self.cache_dir)\n inputs = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='inputs')\n atten = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='atten')\n hidden = model([inputs, atten])[1]\n drop_hidden = tf.keras.layers.Dropout(0.1)(hidden, training=self.params.mc_dropout)\n logits = dict()\n for task_label in self.task_labels:\n logits[task_label] = tf.keras.layers.Dense(labels, activation='sigmoid', name=task_label)(drop_hidden)\n self.tf_model = tf.keras.Model({'inputs': inputs, 'atten': atten}, logits)\n<|end_body_1|>\n\n<|body_start_2|>\n cb_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.params.early_stopping_check, min_delta=self.params.min_epoch_change)\n cb_scheduler = tf.keras.callbacks.LearningRateScheduler(utils.reduce_learning_rate)\n self.tf_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.params.lr), loss=loss_function)\n y = {task_label: train_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n val_y = {task_label: val_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n history = self.tf_model.fit(x={'inputs': train_batches['inputs'], 'atten': train_batches['attentions']}, y=y, epochs=self.params.n_epoch, callbacks=[cb_early, cb_scheduler], validation_data=({'inputs': val_batches['inputs'], 'atten': val_batches['attentions']}, val_y), class_weight=weights)\n return len(history.history['loss'])\n<|end_body_2|>\n\n<|body_start_3|>\n results = collections.defaultdict(list)\n if 'majority' in batches.keys():\n results['majority'] = batches['majority']\n if 'text_id' in batches.keys():\n results['text_id'] = batches['text_id']\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n for i, task_label in enumerate(self.task_labels):\n predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n results[task_label + '_pred'] = predictions\n if task_label in batches.keys():\n results[task_label + '_label'] = batches[task_label]\n if len(self.task_labels) == 1:\n results[task_label + '_logit'] = logits.flatten()\n return pd.DataFrame(results)\n<|end_body_3|>\n\n<|body_start_4|>\n results = collections.defaultdict(list)\n dropout_predictions = np.empty((0, batches['inputs'].shape[0], 1))\n for i, task_label in enumerate(self.task_labels):\n for _ in range(self.params.mc_passes):\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n mc_predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n dropout_predictions = np.vstack((dropout_predictions, mc_predictions[np.newaxis, :, np.newaxis]))\n results[task_label + '_mean'] = list(np.squeeze(np.mean(dropout_predictions, axis=0)))\n results[task_label + '_variance'] = list(np.squeeze(np.var(dropout_predictions, axis=0)))\n return pd.DataFrame(results)\n<|end_body_4|>\n", "class_docstring": "Classifier can be single-task, or multi-task.", "class_name": "Classifier", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Classifier:\n \"\"\"Classifier can be single-task, or multi-task.\"\"\"\n\n def __init__(self, params, task_labels='majority'):\n \"\"\"Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\"\"\"\n <|body_0|>\n\n def create(self, labels=1):\n \"\"\"Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\"\"\"\n <|body_1|>\n\n def train_model(self, train_batches, val_batches, loss_function, weights):\n \"\"\"Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\"\"\"\n <|body_2|>\n\n def predict(self, batches):\n \"\"\"Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_3|>\n\n def mc_predict(self, batches):\n \"\"\"Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = params\n self.task_labels = task_labels\n self.cache_dir = tempfile.gettempdir()\n<|end_body_0|>\n\n<|body_start_1|>\n tf.compat.v1.reset_default_graph()\n config = transformers.BertConfig.from_pretrained(os.path.join(self.params.bert_path, 'config.json'), cache_dir=self.cache_dir)\n model = transformers.TFBertModel.from_pretrained(os.path.join(self.params.bert_path, 'tf_model.h5'), config=config, cache_dir=self.cache_dir)\n inputs = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='inputs')\n atten = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='atten')\n hidden = model([inputs, atten])[1]\n drop_hidden = tf.keras.layers.Dropout(0.1)(hidden, training=self.params.mc_dropout)\n logits = dict()\n for task_label in self.task_labels:\n logits[task_label] = tf.keras.layers.Dense(labels, activation='sigmoid', name=task_label)(drop_hidden)\n self.tf_model = tf.keras.Model({'inputs': inputs, 'atten': atten}, logits)\n<|end_body_1|>\n\n<|body_start_2|>\n cb_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.params.early_stopping_check, min_delta=self.params.min_epoch_change)\n cb_scheduler = tf.keras.callbacks.LearningRateScheduler(utils.reduce_learning_rate)\n self.tf_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.params.lr), loss=loss_function)\n y = {task_label: train_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n val_y = {task_label: val_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n history = self.tf_model.fit(x={'inputs': train_batches['inputs'], 'atten': train_batches['attentions']}, y=y, epochs=self.params.n_epoch, callbacks=[cb_early, cb_scheduler], validation_data=({'inputs': val_batches['inputs'], 'atten': val_batches['attentions']}, val_y), class_weight=weights)\n return len(history.history['loss'])\n<|end_body_2|>\n\n<|body_start_3|>\n results = collections.defaultdict(list)\n if 'majority' in batches.keys():\n results['majority'] = batches['majority']\n if 'text_id' in batches.keys():\n results['text_id'] = batches['text_id']\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n for i, task_label in enumerate(self.task_labels):\n predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n results[task_label + '_pred'] = predictions\n if task_label in batches.keys():\n results[task_label + '_label'] = batches[task_label]\n if len(self.task_labels) == 1:\n results[task_label + '_logit'] = logits.flatten()\n return pd.DataFrame(results)\n<|end_body_3|>\n\n<|body_start_4|>\n results = collections.defaultdict(list)\n dropout_predictions = np.empty((0, batches['inputs'].shape[0], 1))\n for i, task_label in enumerate(self.task_labels):\n for _ in range(self.params.mc_passes):\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n mc_predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n dropout_predictions = np.vstack((dropout_predictions, mc_predictions[np.newaxis, :, np.newaxis]))\n results[task_label + '_mean'] = list(np.squeeze(np.mean(dropout_predictions, axis=0)))\n results[task_label + '_variance'] = list(np.squeeze(np.var(dropout_predictions, axis=0)))\n return pd.DataFrame(results)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000469", "length_bytes": 7260, "license_type": "permissive", "methods": [{"docstring": "Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.", "name": "__init__", "signature": "def __init__(self, params, task_labels='majority')"}, {"docstring": "Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.", "name": "create", "signature": "def create(self, labels=1)"}, {"docstring": "Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.", "name": "train_model", "signature": "def train_model(self, train_batches, val_batches, loss_function, weights)"}, {"docstring": "Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label", "name": "predict", "signature": "def predict(self, batches)"}, {"docstring": "Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label", "name": "mc_predict", "signature": "def mc_predict(self, batches)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004067", "prompt": "Implement the Python class `Classifier` described below.\n\nClass description:\nClassifier can be single-task, or multi-task.\n\nMethod signatures and docstrings:\n- def __init__(self, params, task_labels='majority'): Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\n- def create(self, labels=1): Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\n- def train_model(self, train_batches, val_batches, loss_function, weights): Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\n- def predict(self, batches): Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\n- def mc_predict(self, batches): Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label", "prompted_full_text": "Implement the Python class `Classifier` described below.\n\nClass description:\nClassifier can be single-task, or multi-task.\n\nMethod signatures and docstrings:\n- def __init__(self, params, task_labels='majority'): Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\n- def create(self, labels=1): Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\n- def train_model(self, train_batches, val_batches, loss_function, weights): Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\n- def predict(self, batches): Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\n- def mc_predict(self, batches): Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\n\n<|skeleton|>\nclass Classifier:\n \"\"\"Classifier can be single-task, or multi-task.\"\"\"\n\n def __init__(self, params, task_labels='majority'):\n \"\"\"Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\"\"\"\n <|body_0|>\n\n def create(self, labels=1):\n \"\"\"Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\"\"\"\n <|body_1|>\n\n def train_model(self, train_batches, val_batches, loss_function, weights):\n \"\"\"Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\"\"\"\n <|body_2|>\n\n def predict(self, batches):\n \"\"\"Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_3|>\n\n def mc_predict(self, batches):\n \"\"\"Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = params\n self.task_labels = task_labels\n self.cache_dir = tempfile.gettempdir()\n<|end_body_0|>\n\n<|body_start_1|>\n tf.compat.v1.reset_default_graph()\n config = transformers.BertConfig.from_pretrained(os.path.join(self.params.bert_path, 'config.json'), cache_dir=self.cache_dir)\n model = transformers.TFBertModel.from_pretrained(os.path.join(self.params.bert_path, 'tf_model.h5'), config=config, cache_dir=self.cache_dir)\n inputs = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='inputs')\n atten = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='atten')\n hidden = model([inputs, atten])[1]\n drop_hidden = tf.keras.layers.Dropout(0.1)(hidden, training=self.params.mc_dropout)\n logits = dict()\n for task_label in self.task_labels:\n logits[task_label] = tf.keras.layers.Dense(labels, activation='sigmoid', name=task_label)(drop_hidden)\n self.tf_model = tf.keras.Model({'inputs': inputs, 'atten': atten}, logits)\n<|end_body_1|>\n\n<|body_start_2|>\n cb_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.params.early_stopping_check, min_delta=self.params.min_epoch_change)\n cb_scheduler = tf.keras.callbacks.LearningRateScheduler(utils.reduce_learning_rate)\n self.tf_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.params.lr), loss=loss_function)\n y = {task_label: train_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n val_y = {task_label: val_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n history = self.tf_model.fit(x={'inputs': train_batches['inputs'], 'atten': train_batches['attentions']}, y=y, epochs=self.params.n_epoch, callbacks=[cb_early, cb_scheduler], validation_data=({'inputs': val_batches['inputs'], 'atten': val_batches['attentions']}, val_y), class_weight=weights)\n return len(history.history['loss'])\n<|end_body_2|>\n\n<|body_start_3|>\n results = collections.defaultdict(list)\n if 'majority' in batches.keys():\n results['majority'] = batches['majority']\n if 'text_id' in batches.keys():\n results['text_id'] = batches['text_id']\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n for i, task_label in enumerate(self.task_labels):\n predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n results[task_label + '_pred'] = predictions\n if task_label in batches.keys():\n results[task_label + '_label'] = batches[task_label]\n if len(self.task_labels) == 1:\n results[task_label + '_logit'] = logits.flatten()\n return pd.DataFrame(results)\n<|end_body_3|>\n\n<|body_start_4|>\n results = collections.defaultdict(list)\n dropout_predictions = np.empty((0, batches['inputs'].shape[0], 1))\n for i, task_label in enumerate(self.task_labels):\n for _ in range(self.params.mc_passes):\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n mc_predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n dropout_predictions = np.vstack((dropout_predictions, mc_predictions[np.newaxis, :, np.newaxis]))\n results[task_label + '_mean'] = list(np.squeeze(np.mean(dropout_predictions, axis=0)))\n results[task_label + '_variance'] = list(np.squeeze(np.var(dropout_predictions, axis=0)))\n return pd.DataFrame(results)\n<|end_body_4|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass Classifier:\n \"\"\"Classifier can be single-task, or multi-task.\"\"\"\n\n def __init__(self, params, task_labels='majority'):\n \"\"\"Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\"\"\"\n <|body_0|>\n\n def create(self, labels=1):\n \"\"\"Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\"\"\"\n <|body_1|>\n\n def train_model(self, train_batches, val_batches, loss_function, weights):\n \"\"\"Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\"\"\"\n <|body_2|>\n\n def predict(self, batches):\n \"\"\"Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_3|>\n\n def mc_predict(self, batches):\n \"\"\"Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Classifier:\n \"\"\"Classifier can be single-task, or multi-task.\"\"\"\n\n def __init__(self, params, task_labels='majority'):\n \"\"\"Creates a Classifier instance for predicting task_labels. Args: params: a Params instance which includes the hyperparameters of the model task_labels: list of label names to be predicted from text.\"\"\"\n self.params = params\n self.task_labels = task_labels\n self.cache_dir = tempfile.gettempdir()\n\n def create(self, labels=1):\n \"\"\"Creates a single-task, multi-task or multi-label classifier. Args: labels: shows the number of labels for the output. If more than 1, the model is multi-label. The length of self.task_labels shows whether the model is a single-task or multi-task.\"\"\"\n tf.compat.v1.reset_default_graph()\n config = transformers.BertConfig.from_pretrained(os.path.join(self.params.bert_path, 'config.json'), cache_dir=self.cache_dir)\n model = transformers.TFBertModel.from_pretrained(os.path.join(self.params.bert_path, 'tf_model.h5'), config=config, cache_dir=self.cache_dir)\n inputs = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='inputs')\n atten = tf.keras.layers.Input(shape=(int(self.params.max_l),), dtype=tf.int64, name='atten')\n hidden = model([inputs, atten])[1]\n drop_hidden = tf.keras.layers.Dropout(0.1)(hidden, training=self.params.mc_dropout)\n logits = dict()\n for task_label in self.task_labels:\n logits[task_label] = tf.keras.layers.Dense(labels, activation='sigmoid', name=task_label)(drop_hidden)\n self.tf_model = tf.keras.Model({'inputs': inputs, 'atten': atten}, logits)\n\n def train_model(self, train_batches, val_batches, loss_function, weights):\n \"\"\"Trains and validates a classifier on the input batches. Args: train_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() val_batches: a dictionary of inputs, attnetions and labels created by self.get_batches() loss_function: a function for calculating the loss value during training weights: a dictionary of weights for each task. Returns: the number of training epochs before early stopping.\"\"\"\n cb_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.params.early_stopping_check, min_delta=self.params.min_epoch_change)\n cb_scheduler = tf.keras.callbacks.LearningRateScheduler(utils.reduce_learning_rate)\n self.tf_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.params.lr), loss=loss_function)\n y = {task_label: train_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n val_y = {task_label: val_batches[task_label].replace(np.nan, -1) for task_label in self.task_labels}\n history = self.tf_model.fit(x={'inputs': train_batches['inputs'], 'atten': train_batches['attentions']}, y=y, epochs=self.params.n_epoch, callbacks=[cb_early, cb_scheduler], validation_data=({'inputs': val_batches['inputs'], 'atten': val_batches['attentions']}, val_y), class_weight=weights)\n return len(history.history['loss'])\n\n def predict(self, batches):\n \"\"\"Predicts the outputs for each task_label. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n results = collections.defaultdict(list)\n if 'majority' in batches.keys():\n results['majority'] = batches['majority']\n if 'text_id' in batches.keys():\n results['text_id'] = batches['text_id']\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n for i, task_label in enumerate(self.task_labels):\n predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n results[task_label + '_pred'] = predictions\n if task_label in batches.keys():\n results[task_label + '_label'] = batches[task_label]\n if len(self.task_labels) == 1:\n results[task_label + '_logit'] = logits.flatten()\n return pd.DataFrame(results)\n\n def mc_predict(self, batches):\n \"\"\"Uses the trained models for mc_pass iterations to calculate uncertainty. Each iteration is performed with dropouts, so the predictions vary. Based on Gal and Ghahramani, 2016. Args: batches: the input batches created through self.get_batches() Returns: A dataframe that includes predictions and labels for each task_label\"\"\"\n results = collections.defaultdict(list)\n dropout_predictions = np.empty((0, batches['inputs'].shape[0], 1))\n for i, task_label in enumerate(self.task_labels):\n for _ in range(self.params.mc_passes):\n logits = self.tf_model.predict(x={'inputs': batches['inputs'], 'atten': batches['attentions']})\n mc_predictions = utils.to_binary(logits[i] if len(self.task_labels) > 1 else logits)\n dropout_predictions = np.vstack((dropout_predictions, mc_predictions[np.newaxis, :, np.newaxis]))\n results[task_label + '_mean'] = list(np.squeeze(np.mean(dropout_predictions, axis=0)))\n results[task_label + '_variance'] = list(np.squeeze(np.var(dropout_predictions, axis=0)))\n return pd.DataFrame(results)\n", "source": "the_stack_v2_python_sparse", "source_path": "multi_annotator/classifier.py", "source_repo": "Jimmy-INL/google-research", "split": "val", "star_events_count": 1} {"blob_id": "14aa790df38d91f32b18fde02c3e38fab109e0c9", "bodies": ["if operation == 'update' and self.request.authenticated_role != self.context.author:\n self.request.errors.add('url', 'role', 'Can update document only author')\n self.request.errors.status = 403\n raise error_handler(self.request.errors)\nif self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(self.request, \"Can't {} document in current ({}) tender status\".format(operation, self.request.validated['tender_status']))\nif any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\nif self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):\n raise_operation_error(self.request, \"Can't {} document in current ({}) complaint status\".format(operation, self.request.validated['complaint'].status))\nreturn True", "if not self.validate_complaint_document('add'):\n return\ndocument = upload_file(self.request)\ndocument.author = self.request.authenticated_role\nself.context.documents.append(document)\nif save_tender(self.request):\n self.LOGGER.info('Created tender award complaint document {}'.format(document.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace('collection_', '')\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize('view')}", "if not self.validate_complaint_document('update'):\n return\ndocument = upload_file(self.request)\ndocument.author = self.request.authenticated_role\nself.request.validated['complaint'].documents.append(document)\nif save_tender(self.request):\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))\n return {'data': document.serialize('view')}", "if not self.validate_complaint_document('update'):\n return\nif apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))\n return {'data': self.request.context.serialize('view')}"], "bodies_text": "<|body_start_0|>\n if operation == 'update' and self.request.authenticated_role != self.context.author:\n self.request.errors.add('url', 'role', 'Can update document only author')\n self.request.errors.status = 403\n raise error_handler(self.request.errors)\n if self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(self.request, \"Can't {} document in current ({}) tender status\".format(operation, self.request.validated['tender_status']))\n if any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\n if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):\n raise_operation_error(self.request, \"Can't {} document in current ({}) complaint status\".format(operation, self.request.validated['complaint'].status))\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.validate_complaint_document('add'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Created tender award complaint document {}'.format(document.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace('collection_', '')\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize('view')}\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.validate_complaint_document('update'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.request.validated['complaint'].documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))\n return {'data': document.serialize('view')}\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.validate_complaint_document('update'):\n return\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))\n return {'data': self.request.context.serialize('view')}\n<|end_body_3|>\n", "class_docstring": "", "class_name": "TenderUaAwardComplaintDocumentResource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TenderUaAwardComplaintDocumentResource:\n\n def validate_complaint_document(self, operation):\n \"\"\"TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\"\"\"\n <|body_0|>\n\n def collection_post(self):\n \"\"\"Tender Award Complaint Document Upload\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if operation == 'update' and self.request.authenticated_role != self.context.author:\n self.request.errors.add('url', 'role', 'Can update document only author')\n self.request.errors.status = 403\n raise error_handler(self.request.errors)\n if self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(self.request, \"Can't {} document in current ({}) tender status\".format(operation, self.request.validated['tender_status']))\n if any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\n if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):\n raise_operation_error(self.request, \"Can't {} document in current ({}) complaint status\".format(operation, self.request.validated['complaint'].status))\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.validate_complaint_document('add'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Created tender award complaint document {}'.format(document.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace('collection_', '')\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize('view')}\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.validate_complaint_document('update'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.request.validated['complaint'].documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))\n return {'data': document.serialize('view')}\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.validate_complaint_document('update'):\n return\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))\n return {'data': self.request.context.serialize('view')}\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_val_000470", "length_bytes": 5315, "license_type": "permissive", "methods": [{"docstring": "TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.", "name": "validate_complaint_document", "signature": "def validate_complaint_document(self, operation)"}, {"docstring": "Tender Award Complaint Document Upload", "name": "collection_post", "signature": "def collection_post(self)"}, {"docstring": "Tender Award Complaint Document Update", "name": "put", "signature": "def put(self)"}, {"docstring": "Tender Award Complaint Document Update", "name": "patch", "signature": "def patch(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006850", "prompt": "Implement the Python class `TenderUaAwardComplaintDocumentResource` described below.\n\nClass description:\nImplement the TenderUaAwardComplaintDocumentResource class.\n\nMethod signatures and docstrings:\n- def validate_complaint_document(self, operation): TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\n- def collection_post(self): Tender Award Complaint Document Upload\n- def put(self): Tender Award Complaint Document Update\n- def patch(self): Tender Award Complaint Document Update", "prompted_full_text": "Implement the Python class `TenderUaAwardComplaintDocumentResource` described below.\n\nClass description:\nImplement the TenderUaAwardComplaintDocumentResource class.\n\nMethod signatures and docstrings:\n- def validate_complaint_document(self, operation): TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\n- def collection_post(self): Tender Award Complaint Document Upload\n- def put(self): Tender Award Complaint Document Update\n- def patch(self): Tender Award Complaint Document Update\n\n<|skeleton|>\nclass TenderUaAwardComplaintDocumentResource:\n\n def validate_complaint_document(self, operation):\n \"\"\"TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\"\"\"\n <|body_0|>\n\n def collection_post(self):\n \"\"\"Tender Award Complaint Document Upload\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if operation == 'update' and self.request.authenticated_role != self.context.author:\n self.request.errors.add('url', 'role', 'Can update document only author')\n self.request.errors.status = 403\n raise error_handler(self.request.errors)\n if self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(self.request, \"Can't {} document in current ({}) tender status\".format(operation, self.request.validated['tender_status']))\n if any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\n if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):\n raise_operation_error(self.request, \"Can't {} document in current ({}) complaint status\".format(operation, self.request.validated['complaint'].status))\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.validate_complaint_document('add'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Created tender award complaint document {}'.format(document.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace('collection_', '')\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize('view')}\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.validate_complaint_document('update'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.request.validated['complaint'].documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))\n return {'data': document.serialize('view')}\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.validate_complaint_document('update'):\n return\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))\n return {'data': self.request.context.serialize('view')}\n<|end_body_3|>\n", "revision_id": "5afdd3a62a8e562cf77e2d963d88f1a26613d16a", "skeleton": "<|skeleton|>\nclass TenderUaAwardComplaintDocumentResource:\n\n def validate_complaint_document(self, operation):\n \"\"\"TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\"\"\"\n <|body_0|>\n\n def collection_post(self):\n \"\"\"Tender Award Complaint Document Upload\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_2|>\n\n def patch(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TenderUaAwardComplaintDocumentResource:\n def validate_complaint_document(self, operation):\n \"\"\"TODO move validators This class is inherited in limited and openeu (qualification complaint) package, but validate_complaint_document function has different validators. For now, we have no way to use different validators on methods according to procedure type.\"\"\"\n if operation == 'update' and self.request.authenticated_role != self.context.author:\n self.request.errors.add('url', 'role', 'Can update document only author')\n self.request.errors.status = 403\n raise error_handler(self.request.errors)\n if self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(self.request, \"Can't {} document in current ({}) tender status\".format(operation, self.request.validated['tender_status']))\n if any([i.status != 'active' for i in self.request.validated['tender'].lots if i.id == self.request.validated['award'].lotID]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\n if self.request.validated['complaint'].status not in STATUS4ROLE.get(self.request.authenticated_role, []):\n raise_operation_error(self.request, \"Can't {} document in current ({}) complaint status\".format(operation, self.request.validated['complaint'].status))\n return True\n\n def collection_post(self):\n \"\"\"Tender Award Complaint Document Upload\"\"\"\n if not self.validate_complaint_document('add'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Created tender award complaint document {}'.format(document.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace('collection_', '')\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize('view')}\n\n def put(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n if not self.validate_complaint_document('update'):\n return\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.request.validated['complaint'].documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))\n return {'data': document.serialize('view')}\n\n def patch(self):\n \"\"\"Tender Award Complaint Document Update\"\"\"\n if not self.validate_complaint_document('update'):\n return\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))\n return {'data': self.request.context.serialize('view')}\n", "source": "the_stack_v2_python_sparse", "source_path": "src/openprocurement/tender/openua/views/award_complaint_document.py", "source_repo": "pontostroy/api", "split": "val", "star_events_count": 0} {"blob_id": "6f1ca5c9b6bac3fe918823fe50bfc7bcecf30773", "bodies": ["self.codec = container.yaml_codec\nself.mapper = container.mapper\nself.container = container", "contents = copy.deepcopy(metadata)\ncontents = dict(contents)\ncontents = self.mapper.to_cloud(contents)\ncontents = self.codec.serialize(contents)\nwith self.container.create_cloud_storage() as storage:\n storage.set_artifact_from_string(cloud_identifier, contents)", "with self.container.create_cloud_storage() as storage:\n meta = None\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n if not meta:\n meta = {}\n meta = self.mapper.to_response(meta)\nreturn meta"], "bodies_text": "<|body_start_0|>\n self.codec = container.yaml_codec\n self.mapper = container.mapper\n self.container = container\n<|end_body_0|>\n\n<|body_start_1|>\n contents = copy.deepcopy(metadata)\n contents = dict(contents)\n contents = self.mapper.to_cloud(contents)\n contents = self.codec.serialize(contents)\n with self.container.create_cloud_storage() as storage:\n storage.set_artifact_from_string(cloud_identifier, contents)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.container.create_cloud_storage() as storage:\n meta = None\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n if not meta:\n meta = {}\n meta = self.mapper.to_response(meta)\n return meta\n<|end_body_2|>\n", "class_docstring": "", "class_name": "CloudPortal", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CloudPortal:\n\n def __init__(self, container):\n \"\"\"Args: container(shelf.metadata.container.Container)\"\"\"\n <|body_0|>\n\n def update(self, cloud_identifier, metadata):\n \"\"\"Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\"\"\"\n <|body_1|>\n\n def load(self, cloud_identifier):\n \"\"\"Loads metadata from the cloud. Returns dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.codec = container.yaml_codec\n self.mapper = container.mapper\n self.container = container\n<|end_body_0|>\n\n<|body_start_1|>\n contents = copy.deepcopy(metadata)\n contents = dict(contents)\n contents = self.mapper.to_cloud(contents)\n contents = self.codec.serialize(contents)\n with self.container.create_cloud_storage() as storage:\n storage.set_artifact_from_string(cloud_identifier, contents)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.container.create_cloud_storage() as storage:\n meta = None\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n if not meta:\n meta = {}\n meta = self.mapper.to_response(meta)\n return meta\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000471", "length_bytes": 1963, "license_type": "permissive", "methods": [{"docstring": "Args: container(shelf.metadata.container.Container)", "name": "__init__", "signature": "def __init__(self, container)"}, {"docstring": "Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)", "name": "update", "signature": "def update(self, cloud_identifier, metadata)"}, {"docstring": "Loads metadata from the cloud. Returns dict", "name": "load", "signature": "def load(self, cloud_identifier)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006293", "prompt": "Implement the Python class `CloudPortal` described below.\n\nClass description:\nImplement the CloudPortal class.\n\nMethod signatures and docstrings:\n- def __init__(self, container): Args: container(shelf.metadata.container.Container)\n- def update(self, cloud_identifier, metadata): Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\n- def load(self, cloud_identifier): Loads metadata from the cloud. Returns dict", "prompted_full_text": "Implement the Python class `CloudPortal` described below.\n\nClass description:\nImplement the CloudPortal class.\n\nMethod signatures and docstrings:\n- def __init__(self, container): Args: container(shelf.metadata.container.Container)\n- def update(self, cloud_identifier, metadata): Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\n- def load(self, cloud_identifier): Loads metadata from the cloud. Returns dict\n\n<|skeleton|>\nclass CloudPortal:\n\n def __init__(self, container):\n \"\"\"Args: container(shelf.metadata.container.Container)\"\"\"\n <|body_0|>\n\n def update(self, cloud_identifier, metadata):\n \"\"\"Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\"\"\"\n <|body_1|>\n\n def load(self, cloud_identifier):\n \"\"\"Loads metadata from the cloud. Returns dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.codec = container.yaml_codec\n self.mapper = container.mapper\n self.container = container\n<|end_body_0|>\n\n<|body_start_1|>\n contents = copy.deepcopy(metadata)\n contents = dict(contents)\n contents = self.mapper.to_cloud(contents)\n contents = self.codec.serialize(contents)\n with self.container.create_cloud_storage() as storage:\n storage.set_artifact_from_string(cloud_identifier, contents)\n<|end_body_1|>\n\n<|body_start_2|>\n with self.container.create_cloud_storage() as storage:\n meta = None\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n if not meta:\n meta = {}\n meta = self.mapper.to_response(meta)\n return meta\n<|end_body_2|>\n", "revision_id": "ea59703082402ad3b6454482f0487418295fbd19", "skeleton": "<|skeleton|>\nclass CloudPortal:\n\n def __init__(self, container):\n \"\"\"Args: container(shelf.metadata.container.Container)\"\"\"\n <|body_0|>\n\n def update(self, cloud_identifier, metadata):\n \"\"\"Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\"\"\"\n <|body_1|>\n\n def load(self, cloud_identifier):\n \"\"\"Loads metadata from the cloud. Returns dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CloudPortal:\n def __init__(self, container):\n \"\"\"Args: container(shelf.metadata.container.Container)\"\"\"\n self.codec = container.yaml_codec\n self.mapper = container.mapper\n self.container = container\n\n def update(self, cloud_identifier, metadata):\n \"\"\"Updates the metadata in the cloud which is the source of truth. Args: cloud_identifier(basestring): Something that can identify the file in the cloud. Right now that will be a path to the file in S3 that stores the metadata metadata(schemas/metadata.json)\"\"\"\n contents = copy.deepcopy(metadata)\n contents = dict(contents)\n contents = self.mapper.to_cloud(contents)\n contents = self.codec.serialize(contents)\n with self.container.create_cloud_storage() as storage:\n storage.set_artifact_from_string(cloud_identifier, contents)\n\n def load(self, cloud_identifier):\n \"\"\"Loads metadata from the cloud. Returns dict\"\"\"\n with self.container.create_cloud_storage() as storage:\n meta = None\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n if not meta:\n meta = {}\n meta = self.mapper.to_response(meta)\n return meta\n", "source": "the_stack_v2_python_sparse", "source_path": "shelf/metadata/cloud_portal.py", "source_repo": "bfilipov/shelf", "split": "val", "star_events_count": 0} {"blob_id": "46a57b7429bfa95fed9b004173c0cc7b34df941c", "bodies": ["n = len(searchString)\nmin_window_size = sys.maxsize\nmin_window = ''\nfor left in range(0, n):\n for right in range(left, n):\n window_snippet = searchString[left:right + 1]\n window_contains_all = self.contains_all(window_snippet, t)\n if window_contains_all and len(window_snippet) < min_window_size:\n min_window_size = len(window_snippet)\n min_window = window_snippet\nreturn min_window", "required_characters = {}\nfor i in range(0, len(t)):\n occurrences = 0\n if t[i] in required_characters:\n occurrences = required_characters[t[i]]\n required_characters[t[i]] = occurrences + 1\nfor i in range(0, len(searchString)):\n curr = searchString[i]\n if curr in required_characters:\n new_occurrences = required_characters[curr] - 1\n '\\n If we have satisfied all of the characters for this character, remove the key\\n from the hashtable.\\n \\n Otherwise, just update the mapping with 1 less occurrence to satisfy for\\n '\n if new_occurrences == 0:\n del required_characters[curr]\n else:\n required_characters[curr] = new_occurrences\n'\\n If we satisfied all characters the the required characters hashtable will be\\n empty\\n '\nreturn not required_characters"], "bodies_text": "<|body_start_0|>\n n = len(searchString)\n min_window_size = sys.maxsize\n min_window = ''\n for left in range(0, n):\n for right in range(left, n):\n window_snippet = searchString[left:right + 1]\n window_contains_all = self.contains_all(window_snippet, t)\n if window_contains_all and len(window_snippet) < min_window_size:\n min_window_size = len(window_snippet)\n min_window = window_snippet\n return min_window\n<|end_body_0|>\n\n<|body_start_1|>\n required_characters = {}\n for i in range(0, len(t)):\n occurrences = 0\n if t[i] in required_characters:\n occurrences = required_characters[t[i]]\n required_characters[t[i]] = occurrences + 1\n for i in range(0, len(searchString)):\n curr = searchString[i]\n if curr in required_characters:\n new_occurrences = required_characters[curr] - 1\n '\\n If we have satisfied all of the characters for this character, remove the key\\n from the hashtable.\\n \\n Otherwise, just update the mapping with 1 less occurrence to satisfy for\\n '\n if new_occurrences == 0:\n del required_characters[curr]\n else:\n required_characters[curr] = new_occurrences\n '\\n If we satisfied all characters the the required characters hashtable will be\\n empty\\n '\n return not required_characters\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BruteForceSolution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BruteForceSolution:\n\n def minWindow(self, searchString, t):\n \"\"\"Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\"\"\"\n <|body_0|>\n\n def contains_all(self, searchString, t):\n \"\"\"This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(searchString)\n min_window_size = sys.maxsize\n min_window = ''\n for left in range(0, n):\n for right in range(left, n):\n window_snippet = searchString[left:right + 1]\n window_contains_all = self.contains_all(window_snippet, t)\n if window_contains_all and len(window_snippet) < min_window_size:\n min_window_size = len(window_snippet)\n min_window = window_snippet\n return min_window\n<|end_body_0|>\n\n<|body_start_1|>\n required_characters = {}\n for i in range(0, len(t)):\n occurrences = 0\n if t[i] in required_characters:\n occurrences = required_characters[t[i]]\n required_characters[t[i]] = occurrences + 1\n for i in range(0, len(searchString)):\n curr = searchString[i]\n if curr in required_characters:\n new_occurrences = required_characters[curr] - 1\n '\\n If we have satisfied all of the characters for this character, remove the key\\n from the hashtable.\\n \\n Otherwise, just update the mapping with 1 less occurrence to satisfy for\\n '\n if new_occurrences == 0:\n del required_characters[curr]\n else:\n required_characters[curr] = new_occurrences\n '\\n If we satisfied all characters the the required characters hashtable will be\\n empty\\n '\n return not required_characters\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000472", "length_bytes": 6464, "license_type": "no_license", "methods": [{"docstring": "Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)", "name": "minWindow", "signature": "def minWindow(self, searchString, t)"}, {"docstring": "This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()", "name": "contains_all", "signature": "def contains_all(self, searchString, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004754", "prompt": "Implement the Python class `BruteForceSolution` described below.\n\nClass description:\nImplement the BruteForceSolution class.\n\nMethod signatures and docstrings:\n- def minWindow(self, searchString, t): Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\n- def contains_all(self, searchString, t): This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()", "prompted_full_text": "Implement the Python class `BruteForceSolution` described below.\n\nClass description:\nImplement the BruteForceSolution class.\n\nMethod signatures and docstrings:\n- def minWindow(self, searchString, t): Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\n- def contains_all(self, searchString, t): This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()\n\n<|skeleton|>\nclass BruteForceSolution:\n\n def minWindow(self, searchString, t):\n \"\"\"Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\"\"\"\n <|body_0|>\n\n def contains_all(self, searchString, t):\n \"\"\"This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(searchString)\n min_window_size = sys.maxsize\n min_window = ''\n for left in range(0, n):\n for right in range(left, n):\n window_snippet = searchString[left:right + 1]\n window_contains_all = self.contains_all(window_snippet, t)\n if window_contains_all and len(window_snippet) < min_window_size:\n min_window_size = len(window_snippet)\n min_window = window_snippet\n return min_window\n<|end_body_0|>\n\n<|body_start_1|>\n required_characters = {}\n for i in range(0, len(t)):\n occurrences = 0\n if t[i] in required_characters:\n occurrences = required_characters[t[i]]\n required_characters[t[i]] = occurrences + 1\n for i in range(0, len(searchString)):\n curr = searchString[i]\n if curr in required_characters:\n new_occurrences = required_characters[curr] - 1\n '\\n If we have satisfied all of the characters for this character, remove the key\\n from the hashtable.\\n \\n Otherwise, just update the mapping with 1 less occurrence to satisfy for\\n '\n if new_occurrences == 0:\n del required_characters[curr]\n else:\n required_characters[curr] = new_occurrences\n '\\n If we satisfied all characters the the required characters hashtable will be\\n empty\\n '\n return not required_characters\n<|end_body_1|>\n", "revision_id": "c0d49423885832b616ae3c7cd58e8f24c17cfd4d", "skeleton": "<|skeleton|>\nclass BruteForceSolution:\n\n def minWindow(self, searchString, t):\n \"\"\"Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\"\"\"\n <|body_0|>\n\n def contains_all(self, searchString, t):\n \"\"\"This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BruteForceSolution:\n def minWindow(self, searchString, t):\n \"\"\"Interface ---- :type searchString: str :type t: str :rtype: str Approach ---- 1. Plant the left pointer and scan the window 2. Find all substrings that contain all of the target 3. Take the smallest Complexity ---- M = target string N = length of query string Time : O(N^2) Space : O(1)\"\"\"\n n = len(searchString)\n min_window_size = sys.maxsize\n min_window = ''\n for left in range(0, n):\n for right in range(left, n):\n window_snippet = searchString[left:right + 1]\n window_contains_all = self.contains_all(window_snippet, t)\n if window_contains_all and len(window_snippet) < min_window_size:\n min_window_size = len(window_snippet)\n min_window = window_snippet\n return min_window\n\n def contains_all(self, searchString, t):\n \"\"\"This is a really convoluted way of doing this 1. You could use better syntax 2. Or make use of Collections.Counter()\"\"\"\n required_characters = {}\n for i in range(0, len(t)):\n occurrences = 0\n if t[i] in required_characters:\n occurrences = required_characters[t[i]]\n required_characters[t[i]] = occurrences + 1\n for i in range(0, len(searchString)):\n curr = searchString[i]\n if curr in required_characters:\n new_occurrences = required_characters[curr] - 1\n '\\n If we have satisfied all of the characters for this character, remove the key\\n from the hashtable.\\n \\n Otherwise, just update the mapping with 1 less occurrence to satisfy for\\n '\n if new_occurrences == 0:\n del required_characters[curr]\n else:\n required_characters[curr] = new_occurrences\n '\\n If we satisfied all characters the the required characters hashtable will be\\n empty\\n '\n return not required_characters\n", "source": "the_stack_v2_python_sparse", "source_path": "Hashtables/minimumWindowSubstring.py", "source_repo": "miaviles/Data-Structures-Algorithms-Python", "split": "val", "star_events_count": 0} {"blob_id": "435f48322403ca8e571f3bccfe8cc3a0a1677b7e", "bodies": ["super().__init__()\ncheck_boundaries(boundaries)\nself.boundaries = boundaries\nself.frequencies = frequencies\nself.fraction = fraction", "self.randomize(None)\nself.magnitude = self.R.uniform(low=self.boundaries[0], high=self.boundaries[1])\nself.fracs = self.R.uniform(low=self.fraction[0], high=self.fraction[1])\nself.freqs = self.R.uniform(low=self.frequencies[0], high=self.frequencies[1])\nlength = signal.shape[-1]\ntime_partial = np.arange(0, round(self.fracs * length), 1)\nsquaredpulse_partial = self.magnitude * squarepulse(self.freqs * time_partial)\nloc = np.random.choice(range(length))\nsignal = paste(signal, squaredpulse_partial, (loc,))\nreturn signal"], "bodies_text": "<|body_start_0|>\n super().__init__()\n check_boundaries(boundaries)\n self.boundaries = boundaries\n self.frequencies = frequencies\n self.fraction = fraction\n<|end_body_0|>\n\n<|body_start_1|>\n self.randomize(None)\n self.magnitude = self.R.uniform(low=self.boundaries[0], high=self.boundaries[1])\n self.fracs = self.R.uniform(low=self.fraction[0], high=self.fraction[1])\n self.freqs = self.R.uniform(low=self.frequencies[0], high=self.frequencies[1])\n length = signal.shape[-1]\n time_partial = np.arange(0, round(self.fracs * length), 1)\n squaredpulse_partial = self.magnitude * squarepulse(self.freqs * time_partial)\n loc = np.random.choice(range(length))\n signal = paste(signal, squaredpulse_partial, (loc,))\n return signal\n<|end_body_1|>\n", "class_docstring": "Add a random partial square pulse to a signal", "class_name": "SignalRandAddSquarePulsePartial", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SignalRandAddSquarePulsePartial:\n \"\"\"Add a random partial square pulse to a signal\"\"\"\n\n def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None:\n \"\"\"Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\"\"\"\n <|body_0|>\n\n def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"Args: signal: input 1 dimension signal to which a partial square pulse will be added\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n check_boundaries(boundaries)\n self.boundaries = boundaries\n self.frequencies = frequencies\n self.fraction = fraction\n<|end_body_0|>\n\n<|body_start_1|>\n self.randomize(None)\n self.magnitude = self.R.uniform(low=self.boundaries[0], high=self.boundaries[1])\n self.fracs = self.R.uniform(low=self.fraction[0], high=self.fraction[1])\n self.freqs = self.R.uniform(low=self.frequencies[0], high=self.frequencies[1])\n length = signal.shape[-1]\n time_partial = np.arange(0, round(self.fracs * length), 1)\n squaredpulse_partial = self.magnitude * squarepulse(self.freqs * time_partial)\n loc = np.random.choice(range(length))\n signal = paste(signal, squaredpulse_partial, (loc,))\n return signal\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000473", "length_bytes": 16322, "license_type": "permissive", "methods": [{"docstring": "Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``", "name": "__init__", "signature": "def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None"}, {"docstring": "Args: signal: input 1 dimension signal to which a partial square pulse will be added", "name": "__call__", "signature": "def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000829", "prompt": "Implement the Python class `SignalRandAddSquarePulsePartial` described below.\n\nClass description:\nAdd a random partial square pulse to a signal\n\nMethod signatures and docstrings:\n- def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None: Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\n- def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor: Args: signal: input 1 dimension signal to which a partial square pulse will be added", "prompted_full_text": "Implement the Python class `SignalRandAddSquarePulsePartial` described below.\n\nClass description:\nAdd a random partial square pulse to a signal\n\nMethod signatures and docstrings:\n- def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None: Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\n- def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor: Args: signal: input 1 dimension signal to which a partial square pulse will be added\n\n<|skeleton|>\nclass SignalRandAddSquarePulsePartial:\n \"\"\"Add a random partial square pulse to a signal\"\"\"\n\n def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None:\n \"\"\"Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\"\"\"\n <|body_0|>\n\n def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"Args: signal: input 1 dimension signal to which a partial square pulse will be added\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n check_boundaries(boundaries)\n self.boundaries = boundaries\n self.frequencies = frequencies\n self.fraction = fraction\n<|end_body_0|>\n\n<|body_start_1|>\n self.randomize(None)\n self.magnitude = self.R.uniform(low=self.boundaries[0], high=self.boundaries[1])\n self.fracs = self.R.uniform(low=self.fraction[0], high=self.fraction[1])\n self.freqs = self.R.uniform(low=self.frequencies[0], high=self.frequencies[1])\n length = signal.shape[-1]\n time_partial = np.arange(0, round(self.fracs * length), 1)\n squaredpulse_partial = self.magnitude * squarepulse(self.freqs * time_partial)\n loc = np.random.choice(range(length))\n signal = paste(signal, squaredpulse_partial, (loc,))\n return signal\n<|end_body_1|>\n", "revision_id": "e48c3e2c741fa3fc705c4425d17ac4a5afac6c47", "skeleton": "<|skeleton|>\nclass SignalRandAddSquarePulsePartial:\n \"\"\"Add a random partial square pulse to a signal\"\"\"\n\n def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None:\n \"\"\"Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\"\"\"\n <|body_0|>\n\n def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"Args: signal: input 1 dimension signal to which a partial square pulse will be added\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SignalRandAddSquarePulsePartial:\n \"\"\"Add a random partial square pulse to a signal\"\"\"\n\n def __init__(self, boundaries: Sequence[float]=(0.01, 0.2), frequencies: Sequence[float]=(0.001, 0.02), fraction: Sequence[float]=(0.01, 0.2)) -> None:\n \"\"\"Args: boundaries: list defining lower and upper boundaries for the square pulse magnitude, lower and upper values need to be positive , default : ``[0.01, 0.2]`` frequencies: list defining lower and upper frequencies for square pulse signal generation example : ``[0.001, 0.02]`` fraction: list defining lower and upper boundaries for partial square pulse generation default: ``[0.01, 0.2]``\"\"\"\n super().__init__()\n check_boundaries(boundaries)\n self.boundaries = boundaries\n self.frequencies = frequencies\n self.fraction = fraction\n\n def __call__(self, signal: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"Args: signal: input 1 dimension signal to which a partial square pulse will be added\"\"\"\n self.randomize(None)\n self.magnitude = self.R.uniform(low=self.boundaries[0], high=self.boundaries[1])\n self.fracs = self.R.uniform(low=self.fraction[0], high=self.fraction[1])\n self.freqs = self.R.uniform(low=self.frequencies[0], high=self.frequencies[1])\n length = signal.shape[-1]\n time_partial = np.arange(0, round(self.fracs * length), 1)\n squaredpulse_partial = self.magnitude * squarepulse(self.freqs * time_partial)\n loc = np.random.choice(range(length))\n signal = paste(signal, squaredpulse_partial, (loc,))\n return signal\n", "source": "the_stack_v2_python_sparse", "source_path": "monai/transforms/signal/array.py", "source_repo": "Project-MONAI/MONAI", "split": "val", "star_events_count": 4805} {"blob_id": "5922d5e60a25afa90560123d121796bf0ff523c7", "bodies": ["try:\n return ParameterSetting.objects.get(base_parameter=instance, project=self.context.get('view').kwargs.get('project')).raw_value\nexcept ParameterSetting.DoesNotExist:\n return None", "if instance.type == BaseParameter.CHOICE_TYPE:\n return [x.value for x in instance.get_typed_parameter().get_available_choices()]\nelse:\n return None", "if instance.type == BaseParameter.BOOLEAN_TYPE:\n return 'Boolean'\nelif instance.type == BaseParameter.STATIC_TYPE:\n return 'Static'\nelif instance.type == BaseParameter.STRING_TYPE:\n return 'String'\nelif instance.type == BaseParameter.CHOICE_TYPE:\n return 'Choice'\nelif instance.type == BaseParameter.TEXT_TYPE:\n return 'Text'\nelif instance.type == BaseParameter.INTEGER_TYPE:\n return 'Integer'\nelse:\n return 'Float'"], "bodies_text": "<|body_start_0|>\n try:\n return ParameterSetting.objects.get(base_parameter=instance, project=self.context.get('view').kwargs.get('project')).raw_value\n except ParameterSetting.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [x.value for x in instance.get_typed_parameter().get_available_choices()]\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if instance.type == BaseParameter.BOOLEAN_TYPE:\n return 'Boolean'\n elif instance.type == BaseParameter.STATIC_TYPE:\n return 'Static'\n elif instance.type == BaseParameter.STRING_TYPE:\n return 'String'\n elif instance.type == BaseParameter.CHOICE_TYPE:\n return 'Choice'\n elif instance.type == BaseParameter.TEXT_TYPE:\n return 'Text'\n elif instance.type == BaseParameter.INTEGER_TYPE:\n return 'Integer'\n else:\n return 'Float'\n<|end_body_2|>\n", "class_docstring": "Serializer for Parameter model.", "class_name": "ParameterSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParameterSerializer:\n \"\"\"Serializer for Parameter model.\"\"\"\n\n def get_value(self, instance):\n \"\"\"Get the value of the ParameterSetting belonging to the Parameter.\"\"\"\n <|body_0|>\n\n def get_choices(self, instance):\n \"\"\"Get choices corresponding to Choice parameter.\"\"\"\n <|body_1|>\n\n def get_type(self, instance):\n \"\"\"Get the type of a Parameter.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return ParameterSetting.objects.get(base_parameter=instance, project=self.context.get('view').kwargs.get('project')).raw_value\n except ParameterSetting.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [x.value for x in instance.get_typed_parameter().get_available_choices()]\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if instance.type == BaseParameter.BOOLEAN_TYPE:\n return 'Boolean'\n elif instance.type == BaseParameter.STATIC_TYPE:\n return 'Static'\n elif instance.type == BaseParameter.STRING_TYPE:\n return 'String'\n elif instance.type == BaseParameter.CHOICE_TYPE:\n return 'Choice'\n elif instance.type == BaseParameter.TEXT_TYPE:\n return 'Text'\n elif instance.type == BaseParameter.INTEGER_TYPE:\n return 'Integer'\n else:\n return 'Float'\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000474", "length_bytes": 6038, "license_type": "no_license", "methods": [{"docstring": "Get the value of the ParameterSetting belonging to the Parameter.", "name": "get_value", "signature": "def get_value(self, instance)"}, {"docstring": "Get choices corresponding to Choice parameter.", "name": "get_choices", "signature": "def get_choices(self, instance)"}, {"docstring": "Get the type of a Parameter.", "name": "get_type", "signature": "def get_type(self, instance)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001586", "prompt": "Implement the Python class `ParameterSerializer` described below.\n\nClass description:\nSerializer for Parameter model.\n\nMethod signatures and docstrings:\n- def get_value(self, instance): Get the value of the ParameterSetting belonging to the Parameter.\n- def get_choices(self, instance): Get choices corresponding to Choice parameter.\n- def get_type(self, instance): Get the type of a Parameter.", "prompted_full_text": "Implement the Python class `ParameterSerializer` described below.\n\nClass description:\nSerializer for Parameter model.\n\nMethod signatures and docstrings:\n- def get_value(self, instance): Get the value of the ParameterSetting belonging to the Parameter.\n- def get_choices(self, instance): Get choices corresponding to Choice parameter.\n- def get_type(self, instance): Get the type of a Parameter.\n\n<|skeleton|>\nclass ParameterSerializer:\n \"\"\"Serializer for Parameter model.\"\"\"\n\n def get_value(self, instance):\n \"\"\"Get the value of the ParameterSetting belonging to the Parameter.\"\"\"\n <|body_0|>\n\n def get_choices(self, instance):\n \"\"\"Get choices corresponding to Choice parameter.\"\"\"\n <|body_1|>\n\n def get_type(self, instance):\n \"\"\"Get the type of a Parameter.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return ParameterSetting.objects.get(base_parameter=instance, project=self.context.get('view').kwargs.get('project')).raw_value\n except ParameterSetting.DoesNotExist:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [x.value for x in instance.get_typed_parameter().get_available_choices()]\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n if instance.type == BaseParameter.BOOLEAN_TYPE:\n return 'Boolean'\n elif instance.type == BaseParameter.STATIC_TYPE:\n return 'Static'\n elif instance.type == BaseParameter.STRING_TYPE:\n return 'String'\n elif instance.type == BaseParameter.CHOICE_TYPE:\n return 'Choice'\n elif instance.type == BaseParameter.TEXT_TYPE:\n return 'Text'\n elif instance.type == BaseParameter.INTEGER_TYPE:\n return 'Integer'\n else:\n return 'Float'\n<|end_body_2|>\n", "revision_id": "dfa60c9a812e52fa44f0d3cf1c201943574976df", "skeleton": "<|skeleton|>\nclass ParameterSerializer:\n \"\"\"Serializer for Parameter model.\"\"\"\n\n def get_value(self, instance):\n \"\"\"Get the value of the ParameterSetting belonging to the Parameter.\"\"\"\n <|body_0|>\n\n def get_choices(self, instance):\n \"\"\"Get choices corresponding to Choice parameter.\"\"\"\n <|body_1|>\n\n def get_type(self, instance):\n \"\"\"Get the type of a Parameter.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ParameterSerializer:\n \"\"\"Serializer for Parameter model.\"\"\"\n\n def get_value(self, instance):\n \"\"\"Get the value of the ParameterSetting belonging to the Parameter.\"\"\"\n try:\n return ParameterSetting.objects.get(base_parameter=instance, project=self.context.get('view').kwargs.get('project')).raw_value\n except ParameterSetting.DoesNotExist:\n return None\n\n def get_choices(self, instance):\n \"\"\"Get choices corresponding to Choice parameter.\"\"\"\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [x.value for x in instance.get_typed_parameter().get_available_choices()]\n else:\n return None\n\n def get_type(self, instance):\n \"\"\"Get the type of a Parameter.\"\"\"\n if instance.type == BaseParameter.BOOLEAN_TYPE:\n return 'Boolean'\n elif instance.type == BaseParameter.STATIC_TYPE:\n return 'Static'\n elif instance.type == BaseParameter.STRING_TYPE:\n return 'String'\n elif instance.type == BaseParameter.CHOICE_TYPE:\n return 'Choice'\n elif instance.type == BaseParameter.TEXT_TYPE:\n return 'Text'\n elif instance.type == BaseParameter.INTEGER_TYPE:\n return 'Integer'\n else:\n return 'Float'\n", "source": "the_stack_v2_python_sparse", "source_path": "equestria/processes/api/v1/serializers.py", "source_repo": "KiOui/CLST-2020", "split": "val", "star_events_count": 0} {"blob_id": "d40ddc1f4e0524c01f019316c920a21d30a59937", "bodies": ["super().__init__()\nself.login_url = login_url\nself.cj = cj\nself.pw_mgr = pw_mgr\nself.save_cookies = save_cookies\ntry:\n self.cj.load(ignore_discard=True)\nexcept IOError:\n pass", "if res.code == 200 and res.geturl().startswith(self.login_url + '?'):\n self.cj.extract_cookies(res, req)\n data = urllib.parse.urlencode(self.pw_mgr.get_cred())\n req2 = urllib.request.Request(self.login_url, data.encode('iso-8859-1'))\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n res2 = opener.open(req2)\n self.cj.extract_cookies(res2, req2)\n res = opener.open(req)\n if res.geturl().startswith(self.login_url + '?'):\n raise Exception('Login failed.')\n if self.save_cookies:\n self.cj.extract_cookies(res, req)\n self.cj.save(ignore_discard=True)\nreturn res"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.login_url = login_url\n self.cj = cj\n self.pw_mgr = pw_mgr\n self.save_cookies = save_cookies\n try:\n self.cj.load(ignore_discard=True)\n except IOError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n if res.code == 200 and res.geturl().startswith(self.login_url + '?'):\n self.cj.extract_cookies(res, req)\n data = urllib.parse.urlencode(self.pw_mgr.get_cred())\n req2 = urllib.request.Request(self.login_url, data.encode('iso-8859-1'))\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n res2 = opener.open(req2)\n self.cj.extract_cookies(res2, req2)\n res = opener.open(req)\n if res.geturl().startswith(self.login_url + '?'):\n raise Exception('Login failed.')\n if self.save_cookies:\n self.cj.extract_cookies(res, req)\n self.cj.save(ignore_discard=True)\n return res\n<|end_body_1|>\n", "class_docstring": "urllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url", "class_name": "CosignHandler", "detected_licenses": ["MIT", "GPL-3.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CosignHandler:\n \"\"\"urllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\"\"\"\n\n def __init__(self, login_url, cj, pw_mgr, save_cookies=True):\n \"\"\"Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\"\"\"\n <|body_0|>\n\n def https_response(self, req, res):\n \"\"\"Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.login_url = login_url\n self.cj = cj\n self.pw_mgr = pw_mgr\n self.save_cookies = save_cookies\n try:\n self.cj.load(ignore_discard=True)\n except IOError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n if res.code == 200 and res.geturl().startswith(self.login_url + '?'):\n self.cj.extract_cookies(res, req)\n data = urllib.parse.urlencode(self.pw_mgr.get_cred())\n req2 = urllib.request.Request(self.login_url, data.encode('iso-8859-1'))\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n res2 = opener.open(req2)\n self.cj.extract_cookies(res2, req2)\n res = opener.open(req)\n if res.geturl().startswith(self.login_url + '?'):\n raise Exception('Login failed.')\n if self.save_cookies:\n self.cj.extract_cookies(res, req)\n self.cj.save(ignore_discard=True)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000475", "length_bytes": 5961, "license_type": "permissive", "methods": [{"docstring": "Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.", "name": "__init__", "signature": "def __init__(self, login_url, cj, pw_mgr, save_cookies=True)"}, {"docstring": "Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.", "name": "https_response", "signature": "def https_response(self, req, res)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002308", "prompt": "Implement the Python class `CosignHandler` described below.\n\nClass description:\nurllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\n\nMethod signatures and docstrings:\n- def __init__(self, login_url, cj, pw_mgr, save_cookies=True): Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\n- def https_response(self, req, res): Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.", "prompted_full_text": "Implement the Python class `CosignHandler` described below.\n\nClass description:\nurllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\n\nMethod signatures and docstrings:\n- def __init__(self, login_url, cj, pw_mgr, save_cookies=True): Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\n- def https_response(self, req, res): Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.\n\n<|skeleton|>\nclass CosignHandler:\n \"\"\"urllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\"\"\"\n\n def __init__(self, login_url, cj, pw_mgr, save_cookies=True):\n \"\"\"Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\"\"\"\n <|body_0|>\n\n def https_response(self, req, res):\n \"\"\"Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.login_url = login_url\n self.cj = cj\n self.pw_mgr = pw_mgr\n self.save_cookies = save_cookies\n try:\n self.cj.load(ignore_discard=True)\n except IOError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n if res.code == 200 and res.geturl().startswith(self.login_url + '?'):\n self.cj.extract_cookies(res, req)\n data = urllib.parse.urlencode(self.pw_mgr.get_cred())\n req2 = urllib.request.Request(self.login_url, data.encode('iso-8859-1'))\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n res2 = opener.open(req2)\n self.cj.extract_cookies(res2, req2)\n res = opener.open(req)\n if res.geturl().startswith(self.login_url + '?'):\n raise Exception('Login failed.')\n if self.save_cookies:\n self.cj.extract_cookies(res, req)\n self.cj.save(ignore_discard=True)\n return res\n<|end_body_1|>\n", "revision_id": "d097ca0ad6a6aee2180d32dce6a3322621f655fd", "skeleton": "<|skeleton|>\nclass CosignHandler:\n \"\"\"urllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\"\"\"\n\n def __init__(self, login_url, cj, pw_mgr, save_cookies=True):\n \"\"\"Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\"\"\"\n <|body_0|>\n\n def https_response(self, req, res):\n \"\"\"Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CosignHandler:\n \"\"\"urllib.request style handler for Cosign protected URLs. See http://weblogin.org SYNOPSIS: # Cosign relies on cookies. cj = http.cookiejar.MozillaCookieJar('cookies.txt') # We need an opener that handles cookies and any cosign redirects and # logins. opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj), # Here's the CosignHandler. CosignHandler('https://cosign.login/page', cj, CosignPasswordMgr() # If you've got one big program you'll probably # want to keep the cookies in memory, but for # lots of little programs we get single sign on # behaviour by saving and loading to/from a # file. save_cookies=True ) ) # Construct a request for the page we actually want req = url\"\"\"\n\n def __init__(self, login_url, cj, pw_mgr, save_cookies=True):\n \"\"\"Construct new CosignHandler. Args: login_url: URL of cosign login page. Used to figure out if we have been redirected to the login page after a failed authentication, and as the URL to POST to to log in. cj: An http.cookiejar.CookieJar or equivalent. You'll need something that implements the FileCookieJar interface if you want to load/save cookies. pw_mgr: A CosignPasswordMgr object or equivalent. This object will provide (and if necessary prompt for) the username and password. save_cookies: Whether or not to save cookies to a file after each request. Required for single sign on between different scripts.\"\"\"\n super().__init__()\n self.login_url = login_url\n self.cj = cj\n self.pw_mgr = pw_mgr\n self.save_cookies = save_cookies\n try:\n self.cj.load(ignore_discard=True)\n except IOError:\n pass\n\n def https_response(self, req, res):\n \"\"\"Handle https_response. If the response is from the cosign login page (starts with self.login_url) then log in to cosign and retry. Otherwise continue as normal.\"\"\"\n if res.code == 200 and res.geturl().startswith(self.login_url + '?'):\n self.cj.extract_cookies(res, req)\n data = urllib.parse.urlencode(self.pw_mgr.get_cred())\n req2 = urllib.request.Request(self.login_url, data.encode('iso-8859-1'))\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))\n res2 = opener.open(req2)\n self.cj.extract_cookies(res2, req2)\n res = opener.open(req)\n if res.geturl().startswith(self.login_url + '?'):\n raise Exception('Login failed.')\n if self.save_cookies:\n self.cj.extract_cookies(res, req)\n self.cj.save(ignore_discard=True)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "recipes/Python/578217_Cosign_Handler/recipe-578217.py", "source_repo": "betty29/code-1", "split": "val", "star_events_count": 0} {"blob_id": "c00d7d8ba193fdc9464513dbbe9f3cf1d3efc0f0", "bodies": ["self.time = 0\nself.recentMax = 10\nself.tweetTime = dict()\nself.user = dict()", "if userId not in self.user:\n self.user[userId] = Twitter.Node()\nself.user[userId].tweet.append(tweetId)\nself.time += 1\nself.tweetTime[tweetId] = self.time", "if userId not in self.user:\n return list()\nelse:\n ans = self.user[userId].tweet[-10:][::-1]\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = (0, 0, list())\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans", "if followerId != followeeId:\n if followerId not in self.user:\n self.user[followerId] = Twitter.Node()\n self.user[followerId].followee.add(followeeId)", "if followerId != followeeId:\n if followerId in self.user:\n self.user[followerId].followee.discard(followeeId)"], "bodies_text": "<|body_start_0|>\n self.time = 0\n self.recentMax = 10\n self.tweetTime = dict()\n self.user = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if userId not in self.user:\n self.user[userId] = Twitter.Node()\n self.user[userId].tweet.append(tweetId)\n self.time += 1\n self.tweetTime[tweetId] = self.time\n<|end_body_1|>\n\n<|body_start_2|>\n if userId not in self.user:\n return list()\n else:\n ans = self.user[userId].tweet[-10:][::-1]\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = (0, 0, list())\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId != followeeId:\n if followerId not in self.user:\n self.user[followerId] = Twitter.Node()\n self.user[followerId].followee.add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId != followeeId:\n if followerId in self.user:\n self.user[followerId].followee.discard(followeeId)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Twitter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.recentMax = 10\n self.tweetTime = dict()\n self.user = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if userId not in self.user:\n self.user[userId] = Twitter.Node()\n self.user[userId].tweet.append(tweetId)\n self.time += 1\n self.tweetTime[tweetId] = self.time\n<|end_body_1|>\n\n<|body_start_2|>\n if userId not in self.user:\n return list()\n else:\n ans = self.user[userId].tweet[-10:][::-1]\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = (0, 0, list())\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId != followeeId:\n if followerId not in self.user:\n self.user[followerId] = Twitter.Node()\n self.user[followerId].followee.add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId != followeeId:\n if followerId in self.user:\n self.user[followerId].followee.discard(followeeId)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000476", "length_bytes": 3824, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Compose a new tweet.", "name": "postTweet", "signature": "def postTweet(self, userId: int, tweetId: int) -> None"}, {"docstring": "Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.", "name": "getNewsFeed", "signature": "def getNewsFeed(self, userId: int) -> List[int]"}, {"docstring": "Follower follows a followee. If the operation is invalid, it should be a no-op.", "name": "follow", "signature": "def follow(self, followerId: int, followeeId: int) -> None"}, {"docstring": "Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "name": "unfollow", "signature": "def unfollow(self, followerId: int, followeeId: int) -> None"}], "n_methods": 5, "prompt": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "prompted_full_text": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.\n\n<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.recentMax = 10\n self.tweetTime = dict()\n self.user = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n if userId not in self.user:\n self.user[userId] = Twitter.Node()\n self.user[userId].tweet.append(tweetId)\n self.time += 1\n self.tweetTime[tweetId] = self.time\n<|end_body_1|>\n\n<|body_start_2|>\n if userId not in self.user:\n return list()\n else:\n ans = self.user[userId].tweet[-10:][::-1]\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = (0, 0, list())\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId != followeeId:\n if followerId not in self.user:\n self.user[followerId] = Twitter.Node()\n self.user[followerId].followee.add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId != followeeId:\n if followerId in self.user:\n self.user[followerId].followee.discard(followeeId)\n<|end_body_4|>\n", "revision_id": "f56e59f116e6b51e222debdd575e840b74165568", "skeleton": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Twitter:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.time = 0\n self.recentMax = 10\n self.tweetTime = dict()\n self.user = dict()\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n if userId not in self.user:\n self.user[userId] = Twitter.Node()\n self.user[userId].tweet.append(tweetId)\n self.time += 1\n self.tweetTime[tweetId] = self.time\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n if userId not in self.user:\n return list()\n else:\n ans = self.user[userId].tweet[-10:][::-1]\n for followeeId in self.user[userId].followee:\n if followeeId in self.user:\n opt = self.user[followeeId].tweet[-10:][::-1]\n i, j, combined = (0, 0, list())\n while i < len(ans) and j < len(opt):\n if self.tweetTime[ans[i]] > self.tweetTime[opt[j]]:\n combined.append(ans[i])\n i += 1\n else:\n combined.append(opt[j])\n j += 1\n combined.extend(ans[i:])\n combined.extend(opt[j:])\n ans = combined[:10]\n return ans\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId != followeeId:\n if followerId not in self.user:\n self.user[followerId] = Twitter.Node()\n self.user[followerId].followee.add(followeeId)\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId != followeeId:\n if followerId in self.user:\n self.user[followerId].followee.discard(followeeId)\n", "source": "the_stack_v2_python_sparse", "source_path": "problem_355.py", "source_repo": "zhou-jia-ming/leetcode-py", "split": "val", "star_events_count": 0} {"blob_id": "4c801d8d88e9ca231df0d601a38ab26178bb531b", "bodies": ["user = authenticate(request)\nif not MartAccessPermissions.top().user_can_access_definition(user, definition_id):\n raise HTTPUnauthorized()\nif not get_settings().mart_allow_runtime_creation:\n raise HTTPForbidden('Runtime Mart creation is not allowed')\ncreator = MartCreator(user, definition_id)\ntry:\n creator.validate_parameters(request.payload.parameters)\nexcept Error as exc:\n raise HTTPBadRequest(str(exc))\ndefinition = get_definition(definition_id)\nif not MartQuota.top().can_create_mart(user, definition):\n raise HTTPForbidden('Creating a Mart of this Definition would violate your Quota')\npayload = request.payload._asdict()\npayload['owner'] = user\npayload['definition'] = definition_id\nfrom rex.asynctask import get_transport\ntransport = get_transport()\ntransport.submit_task(get_settings().mart_runtime_creation_queue, payload)\nresponse = self.make_response(request, payload)\nresponse.status = 202\nreturn response", "user = authenticate(request)\nmart_access = MartAccessPermissions.top()\nmarts = mart_access.get_marts_for_user(user, definition_id=definition_id)\nresponse = {'definition': render_definition(get_definition(definition_id)), 'marts': [mart.as_dict() for mart in marts]}\nreturn response"], "bodies_text": "<|body_start_0|>\n user = authenticate(request)\n if not MartAccessPermissions.top().user_can_access_definition(user, definition_id):\n raise HTTPUnauthorized()\n if not get_settings().mart_allow_runtime_creation:\n raise HTTPForbidden('Runtime Mart creation is not allowed')\n creator = MartCreator(user, definition_id)\n try:\n creator.validate_parameters(request.payload.parameters)\n except Error as exc:\n raise HTTPBadRequest(str(exc))\n definition = get_definition(definition_id)\n if not MartQuota.top().can_create_mart(user, definition):\n raise HTTPForbidden('Creating a Mart of this Definition would violate your Quota')\n payload = request.payload._asdict()\n payload['owner'] = user\n payload['definition'] = definition_id\n from rex.asynctask import get_transport\n transport = get_transport()\n transport.submit_task(get_settings().mart_runtime_creation_queue, payload)\n response = self.make_response(request, payload)\n response.status = 202\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n user = authenticate(request)\n mart_access = MartAccessPermissions.top()\n marts = mart_access.get_marts_for_user(user, definition_id=definition_id)\n response = {'definition': render_definition(get_definition(definition_id)), 'marts': [mart.as_dict() for mart in marts]}\n return response\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DefinitionDetailResource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DefinitionDetailResource:\n\n def create(self, request, definition_id, **params):\n \"\"\"Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, definition_id, **params):\n \"\"\"Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = authenticate(request)\n if not MartAccessPermissions.top().user_can_access_definition(user, definition_id):\n raise HTTPUnauthorized()\n if not get_settings().mart_allow_runtime_creation:\n raise HTTPForbidden('Runtime Mart creation is not allowed')\n creator = MartCreator(user, definition_id)\n try:\n creator.validate_parameters(request.payload.parameters)\n except Error as exc:\n raise HTTPBadRequest(str(exc))\n definition = get_definition(definition_id)\n if not MartQuota.top().can_create_mart(user, definition):\n raise HTTPForbidden('Creating a Mart of this Definition would violate your Quota')\n payload = request.payload._asdict()\n payload['owner'] = user\n payload['definition'] = definition_id\n from rex.asynctask import get_transport\n transport = get_transport()\n transport.submit_task(get_settings().mart_runtime_creation_queue, payload)\n response = self.make_response(request, payload)\n response.status = 202\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n user = authenticate(request)\n mart_access = MartAccessPermissions.top()\n marts = mart_access.get_marts_for_user(user, definition_id=definition_id)\n response = {'definition': render_definition(get_definition(definition_id)), 'marts': [mart.as_dict() for mart in marts]}\n return response\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000477", "length_bytes": 13752, "license_type": "permissive", "methods": [{"docstring": "Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.", "name": "create", "signature": "def create(self, request, definition_id, **params)"}, {"docstring": "Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.", "name": "retrieve", "signature": "def retrieve(self, request, definition_id, **params)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007366", "prompt": "Implement the Python class `DefinitionDetailResource` described below.\n\nClass description:\nImplement the DefinitionDetailResource class.\n\nMethod signatures and docstrings:\n- def create(self, request, definition_id, **params): Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\n- def retrieve(self, request, definition_id, **params): Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.", "prompted_full_text": "Implement the Python class `DefinitionDetailResource` described below.\n\nClass description:\nImplement the DefinitionDetailResource class.\n\nMethod signatures and docstrings:\n- def create(self, request, definition_id, **params): Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\n- def retrieve(self, request, definition_id, **params): Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.\n\n<|skeleton|>\nclass DefinitionDetailResource:\n\n def create(self, request, definition_id, **params):\n \"\"\"Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, definition_id, **params):\n \"\"\"Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = authenticate(request)\n if not MartAccessPermissions.top().user_can_access_definition(user, definition_id):\n raise HTTPUnauthorized()\n if not get_settings().mart_allow_runtime_creation:\n raise HTTPForbidden('Runtime Mart creation is not allowed')\n creator = MartCreator(user, definition_id)\n try:\n creator.validate_parameters(request.payload.parameters)\n except Error as exc:\n raise HTTPBadRequest(str(exc))\n definition = get_definition(definition_id)\n if not MartQuota.top().can_create_mart(user, definition):\n raise HTTPForbidden('Creating a Mart of this Definition would violate your Quota')\n payload = request.payload._asdict()\n payload['owner'] = user\n payload['definition'] = definition_id\n from rex.asynctask import get_transport\n transport = get_transport()\n transport.submit_task(get_settings().mart_runtime_creation_queue, payload)\n response = self.make_response(request, payload)\n response.status = 202\n return response\n<|end_body_0|>\n\n<|body_start_1|>\n user = authenticate(request)\n mart_access = MartAccessPermissions.top()\n marts = mart_access.get_marts_for_user(user, definition_id=definition_id)\n response = {'definition': render_definition(get_definition(definition_id)), 'marts': [mart.as_dict() for mart in marts]}\n return response\n<|end_body_1|>\n", "revision_id": "5588355677873ef1531ddbd1816eb2b0f6ea6996", "skeleton": "<|skeleton|>\nclass DefinitionDetailResource:\n\n def create(self, request, definition_id, **params):\n \"\"\"Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, definition_id, **params):\n \"\"\"Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DefinitionDetailResource:\n def create(self, request, definition_id, **params):\n \"\"\"Initiates the creation of a Mart using the specified Mart Definition. The body of this request allows two parameters: * ``purge_on_failure``: Whether or not to purge the remnants of the Mart if creation fails at any point. Optional. Defaults to ``true``. * ``leave_incomplete``: Whether or not to leave the status of the Mart as not \"complete\" when the creation has actually completed. Optional. Defaults to ``false``.\"\"\"\n user = authenticate(request)\n if not MartAccessPermissions.top().user_can_access_definition(user, definition_id):\n raise HTTPUnauthorized()\n if not get_settings().mart_allow_runtime_creation:\n raise HTTPForbidden('Runtime Mart creation is not allowed')\n creator = MartCreator(user, definition_id)\n try:\n creator.validate_parameters(request.payload.parameters)\n except Error as exc:\n raise HTTPBadRequest(str(exc))\n definition = get_definition(definition_id)\n if not MartQuota.top().can_create_mart(user, definition):\n raise HTTPForbidden('Creating a Mart of this Definition would violate your Quota')\n payload = request.payload._asdict()\n payload['owner'] = user\n payload['definition'] = definition_id\n from rex.asynctask import get_transport\n transport = get_transport()\n transport.submit_task(get_settings().mart_runtime_creation_queue, payload)\n response = self.make_response(request, payload)\n response.status = 202\n return response\n\n def retrieve(self, request, definition_id, **params):\n \"\"\"Retrieves details about the specified Mart Definition, as well as a list of the Marts that were created using that Definition that the user has access to.\"\"\"\n user = authenticate(request)\n mart_access = MartAccessPermissions.top()\n marts = mart_access.get_marts_for_user(user, definition_id=definition_id)\n response = {'definition': render_definition(get_definition(definition_id)), 'marts': [mart.as_dict() for mart in marts]}\n return response\n", "source": "the_stack_v2_python_sparse", "source_path": "src/rex.mart/src/rex/mart/commands.py", "source_repo": "prometheusresearch/baseline-codebase", "split": "val", "star_events_count": 9} {"blob_id": "a02aae8b0ad9829c94253ecbd7d633c80ff9b73a", "bodies": ["super().__init__(config)\nself.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight]))\nself.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias]))\nself.out_proj_weight = vilt_layer.attention.output.dense.weight\nself.out_proj_bias = vilt_layer.attention.output.dense.bias\nself.linear1_weight = vilt_layer.intermediate.dense.weight\nself.linear1_bias = vilt_layer.intermediate.dense.bias\nself.linear2_weight = vilt_layer.output.dense.weight\nself.linear2_bias = vilt_layer.output.dense.bias\nself.norm1_eps = vilt_layer.layernorm_before.eps\nself.norm1_weight = vilt_layer.layernorm_before.weight\nself.norm1_bias = vilt_layer.layernorm_before.bias\nself.norm2_eps = vilt_layer.layernorm_after.eps\nself.norm2_weight = vilt_layer.layernorm_after.weight\nself.norm2_bias = vilt_layer.layernorm_after.bias\nself.num_heads = vilt_layer.attention.attention.num_attention_heads\nself.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads)\nself.is_last_layer = False\nself.norm_first = True\nself.validate_bettertransformer()", "super().forward_checker()\nattention_mask = None\nhidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\nif hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\nreturn (hidden_states,)"], "bodies_text": "<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias]))\n self.out_proj_weight = vilt_layer.attention.output.dense.weight\n self.out_proj_bias = vilt_layer.attention.output.dense.bias\n self.linear1_weight = vilt_layer.intermediate.dense.weight\n self.linear1_bias = vilt_layer.intermediate.dense.bias\n self.linear2_weight = vilt_layer.output.dense.weight\n self.linear2_bias = vilt_layer.output.dense.bias\n self.norm1_eps = vilt_layer.layernorm_before.eps\n self.norm1_weight = vilt_layer.layernorm_before.weight\n self.norm1_bias = vilt_layer.layernorm_before.bias\n self.norm2_eps = vilt_layer.layernorm_after.eps\n self.norm2_weight = vilt_layer.layernorm_after.weight\n self.norm2_bias = vilt_layer.layernorm_after.bias\n self.num_heads = vilt_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ViltLayerBetterTransformer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ViltLayerBetterTransformer:\n\n def __init__(self, vilt_layer, config):\n \"\"\"A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias]))\n self.out_proj_weight = vilt_layer.attention.output.dense.weight\n self.out_proj_bias = vilt_layer.attention.output.dense.bias\n self.linear1_weight = vilt_layer.intermediate.dense.weight\n self.linear1_bias = vilt_layer.intermediate.dense.bias\n self.linear2_weight = vilt_layer.output.dense.weight\n self.linear2_bias = vilt_layer.output.dense.bias\n self.norm1_eps = vilt_layer.layernorm_before.eps\n self.norm1_weight = vilt_layer.layernorm_before.weight\n self.norm1_bias = vilt_layer.layernorm_before.bias\n self.norm2_eps = vilt_layer.layernorm_after.eps\n self.norm2_weight = vilt_layer.layernorm_after.weight\n self.norm2_bias = vilt_layer.layernorm_after.bias\n self.num_heads = vilt_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000478", "length_bytes": 43670, "license_type": "no_license", "methods": [{"docstring": "A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.", "name": "__init__", "signature": "def __init__(self, vilt_layer, config)"}, {"docstring": "This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "name": "forward", "signature": "def forward(self, hidden_states, *_, **__)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004382", "prompt": "Implement the Python class `ViltLayerBetterTransformer` described below.\n\nClass description:\nImplement the ViltLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, vilt_layer, config): A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\n- def forward(self, hidden_states, *_, **__): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "prompted_full_text": "Implement the Python class `ViltLayerBetterTransformer` described below.\n\nClass description:\nImplement the ViltLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, vilt_layer, config): A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\n- def forward(self, hidden_states, *_, **__): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\n\n<|skeleton|>\nclass ViltLayerBetterTransformer:\n\n def __init__(self, vilt_layer, config):\n \"\"\"A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias]))\n self.out_proj_weight = vilt_layer.attention.output.dense.weight\n self.out_proj_bias = vilt_layer.attention.output.dense.bias\n self.linear1_weight = vilt_layer.intermediate.dense.weight\n self.linear1_bias = vilt_layer.intermediate.dense.bias\n self.linear2_weight = vilt_layer.output.dense.weight\n self.linear2_bias = vilt_layer.output.dense.bias\n self.norm1_eps = vilt_layer.layernorm_before.eps\n self.norm1_weight = vilt_layer.layernorm_before.weight\n self.norm1_bias = vilt_layer.layernorm_before.bias\n self.norm2_eps = vilt_layer.layernorm_after.eps\n self.norm2_weight = vilt_layer.layernorm_after.weight\n self.norm2_bias = vilt_layer.layernorm_after.bias\n self.num_heads = vilt_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass ViltLayerBetterTransformer:\n\n def __init__(self, vilt_layer, config):\n \"\"\"A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ViltLayerBetterTransformer:\n def __init__(self, vilt_layer, config):\n \"\"\"A simple conversion of the VilTLayer to its `BetterTransformer` implementation. Args: vilt_layer (`torch.nn.Module`): The original `VilTLayer` where the weights needs to be retrieved.\"\"\"\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.weight, vilt_layer.attention.attention.key.weight, vilt_layer.attention.attention.value.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([vilt_layer.attention.attention.query.bias, vilt_layer.attention.attention.key.bias, vilt_layer.attention.attention.value.bias]))\n self.out_proj_weight = vilt_layer.attention.output.dense.weight\n self.out_proj_bias = vilt_layer.attention.output.dense.bias\n self.linear1_weight = vilt_layer.intermediate.dense.weight\n self.linear1_bias = vilt_layer.intermediate.dense.bias\n self.linear2_weight = vilt_layer.output.dense.weight\n self.linear2_bias = vilt_layer.output.dense.bias\n self.norm1_eps = vilt_layer.layernorm_before.eps\n self.norm1_weight = vilt_layer.layernorm_before.weight\n self.norm1_bias = vilt_layer.layernorm_before.bias\n self.norm2_eps = vilt_layer.layernorm_after.eps\n self.norm2_weight = vilt_layer.layernorm_after.weight\n self.norm2_bias = vilt_layer.layernorm_after.bias\n self.num_heads = vilt_layer.attention.attention.num_attention_heads\n self.embed_dim = int(vilt_layer.attention.attention.attention_head_size * self.num_heads)\n self.is_last_layer = False\n self.norm_first = True\n self.validate_bettertransformer()\n\n def forward(self, hidden_states, *_, **__):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n super().forward_checker()\n attention_mask = None\n hidden_states = torch._transformer_encoder_layer_fwd(hidden_states, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attention_mask)\n if hidden_states.is_nested and self.is_last_layer:\n hidden_states = hidden_states.to_padded_tensor(0.0)\n return (hidden_states,)\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_huggingface_optimum.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "df5d2e0541397e5c8c6863ced056aa9a5711873f", "bodies": ["query = self.session.query(VDealhistory.time, VDealhistory.deal, VDealhistory.positionid, VDealhistory.login, VDealhistory.symbol, VDealhistory.action, VDealhistory.entry, VDealhistory.volume, VDealhistory.price, VDealhistory.priceposition, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\nif start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\nif end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\nif mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\nif page == -1:\n return query.all()\nif not page or page == '' or page == 'undefined':\n page = 1\nreturn Pagination(query=query, page=page)", "query = self.session.query(VDealhistory.time, VDealhistory.login, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\nsum_list = []\nprofit_sum = 0\nstorage_sum = 0\ncommission_sum = 0\nprofitraw_sum = 0\nif start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\nif end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\nif mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\nfor obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n commission_sum = commission_sum + obj.commission\n profitraw_sum = profitraw_sum + obj.profitraw\nsum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'commission_sum': str(commission_sum), 'profitraw_sum': str(profitraw_sum)})\nreturn sum_list"], "bodies_text": "<|body_start_0|>\n query = self.session.query(VDealhistory.time, VDealhistory.deal, VDealhistory.positionid, VDealhistory.login, VDealhistory.symbol, VDealhistory.action, VDealhistory.entry, VDealhistory.volume, VDealhistory.price, VDealhistory.priceposition, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VDealhistory.time, VDealhistory.login, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n commission_sum = 0\n profitraw_sum = 0\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n commission_sum = commission_sum + obj.commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'commission_sum': str(commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "class_docstring": "v_dealhistory视图操作", "class_name": "VDealhistoryDao", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VDealhistoryDao:\n \"\"\"v_dealhistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = self.session.query(VDealhistory.time, VDealhistory.deal, VDealhistory.positionid, VDealhistory.login, VDealhistory.symbol, VDealhistory.action, VDealhistory.entry, VDealhistory.volume, VDealhistory.price, VDealhistory.priceposition, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VDealhistory.time, VDealhistory.login, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n commission_sum = 0\n profitraw_sum = 0\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n commission_sum = commission_sum + obj.commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'commission_sum': str(commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000479", "length_bytes": 26694, "license_type": "permissive", "methods": [{"docstring": "已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset", "name": "search_by_uid", "signature": "def search_by_uid(self, uid, start, end, mtlogin, page=None)"}, {"docstring": "已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和", "name": "searchsum_by_uid", "signature": "def searchsum_by_uid(self, uid, start, end, mtlogin)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002007", "prompt": "Implement the Python class `VDealhistoryDao` described below.\n\nClass description:\nv_dealhistory视图操作\n\nMethod signatures and docstrings:\n- def search_by_uid(self, uid, start, end, mtlogin, page=None): 已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\n- def searchsum_by_uid(self, uid, start, end, mtlogin): 已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和", "prompted_full_text": "Implement the Python class `VDealhistoryDao` described below.\n\nClass description:\nv_dealhistory视图操作\n\nMethod signatures and docstrings:\n- def search_by_uid(self, uid, start, end, mtlogin, page=None): 已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\n- def searchsum_by_uid(self, uid, start, end, mtlogin): 已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\n\n<|skeleton|>\nclass VDealhistoryDao:\n \"\"\"v_dealhistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = self.session.query(VDealhistory.time, VDealhistory.deal, VDealhistory.positionid, VDealhistory.login, VDealhistory.symbol, VDealhistory.action, VDealhistory.entry, VDealhistory.volume, VDealhistory.price, VDealhistory.priceposition, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VDealhistory.time, VDealhistory.login, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n commission_sum = 0\n profitraw_sum = 0\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n commission_sum = commission_sum + obj.commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'commission_sum': str(commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "revision_id": "1fadeecf31f1d25e258dc5d70c47a785f7b33961", "skeleton": "<|skeleton|>\nclass VDealhistoryDao:\n \"\"\"v_dealhistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VDealhistoryDao:\n \"\"\"v_dealhistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知id,根据时间段,查询已成交订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n query = self.session.query(VDealhistory.time, VDealhistory.deal, VDealhistory.positionid, VDealhistory.login, VDealhistory.symbol, VDealhistory.action, VDealhistory.entry, VDealhistory.volume, VDealhistory.price, VDealhistory.priceposition, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n query = self.session.query(VDealhistory.time, VDealhistory.login, VDealhistory.profit, VDealhistory.storage, VDealhistory.commission, VDealhistory.profitraw).filter(VDealhistory.uid == uid)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n commission_sum = 0\n profitraw_sum = 0\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VDealhistory.time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VDealhistory.time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VDealhistory.login == mtlogin)\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n commission_sum = commission_sum + obj.commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'commission_sum': str(commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n", "source": "the_stack_v2_python_sparse", "source_path": "xwcrm/model/views.py", "source_repo": "MSUNorg/XWCRM", "split": "val", "star_events_count": 0} {"blob_id": "192561fc66246fdd6f91ec1a7cc412d39734c356", "bodies": ["super(SimSiam, self).__init__()\nself.encoder = base_encoder(num_classes=dim, zero_init_residual=True)\nprev_dim = self.encoder.fc.weight.shape[1]\nself.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), self.encoder.fc, nn.BatchNorm1d(dim, affine=False))\nself.encoder.fc[6].bias.requires_grad = False\nself.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), nn.BatchNorm1d(pred_dim), nn.ReLU(inplace=True), nn.Linear(pred_dim, dim))", "z1 = self.encoder(x1)\nz2 = self.encoder(x2)\np1 = self.predictor(z1)\np2 = self.predictor(z2)\nreturn (p1, p2, z1.detach(), z2.detach())"], "bodies_text": "<|body_start_0|>\n super(SimSiam, self).__init__()\n self.encoder = base_encoder(num_classes=dim, zero_init_residual=True)\n prev_dim = self.encoder.fc.weight.shape[1]\n self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), self.encoder.fc, nn.BatchNorm1d(dim, affine=False))\n self.encoder.fc[6].bias.requires_grad = False\n self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), nn.BatchNorm1d(pred_dim), nn.ReLU(inplace=True), nn.Linear(pred_dim, dim))\n<|end_body_0|>\n\n<|body_start_1|>\n z1 = self.encoder(x1)\n z2 = self.encoder(x2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n return (p1, p2, z1.detach(), z2.detach())\n<|end_body_1|>\n", "class_docstring": "Build a SimSiam model.", "class_name": "SimSiam", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SimSiam:\n \"\"\"Build a SimSiam model.\"\"\"\n\n def __init__(self, base_encoder, dim=2048, pred_dim=512):\n \"\"\"dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\"\"\"\n <|body_0|>\n\n def forward(self, x1, x2):\n \"\"\"Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimSiam, self).__init__()\n self.encoder = base_encoder(num_classes=dim, zero_init_residual=True)\n prev_dim = self.encoder.fc.weight.shape[1]\n self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), self.encoder.fc, nn.BatchNorm1d(dim, affine=False))\n self.encoder.fc[6].bias.requires_grad = False\n self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), nn.BatchNorm1d(pred_dim), nn.ReLU(inplace=True), nn.Linear(pred_dim, dim))\n<|end_body_0|>\n\n<|body_start_1|>\n z1 = self.encoder(x1)\n z2 = self.encoder(x2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n return (p1, p2, z1.detach(), z2.detach())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000480", "length_bytes": 2880, "license_type": "no_license", "methods": [{"docstring": "dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)", "name": "__init__", "signature": "def __init__(self, base_encoder, dim=2048, pred_dim=512)"}, {"docstring": "Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations", "name": "forward", "signature": "def forward(self, x1, x2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000268", "prompt": "Implement the Python class `SimSiam` described below.\n\nClass description:\nBuild a SimSiam model.\n\nMethod signatures and docstrings:\n- def __init__(self, base_encoder, dim=2048, pred_dim=512): dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\n- def forward(self, x1, x2): Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations", "prompted_full_text": "Implement the Python class `SimSiam` described below.\n\nClass description:\nBuild a SimSiam model.\n\nMethod signatures and docstrings:\n- def __init__(self, base_encoder, dim=2048, pred_dim=512): dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\n- def forward(self, x1, x2): Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\n\n<|skeleton|>\nclass SimSiam:\n \"\"\"Build a SimSiam model.\"\"\"\n\n def __init__(self, base_encoder, dim=2048, pred_dim=512):\n \"\"\"dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\"\"\"\n <|body_0|>\n\n def forward(self, x1, x2):\n \"\"\"Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimSiam, self).__init__()\n self.encoder = base_encoder(num_classes=dim, zero_init_residual=True)\n prev_dim = self.encoder.fc.weight.shape[1]\n self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), self.encoder.fc, nn.BatchNorm1d(dim, affine=False))\n self.encoder.fc[6].bias.requires_grad = False\n self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), nn.BatchNorm1d(pred_dim), nn.ReLU(inplace=True), nn.Linear(pred_dim, dim))\n<|end_body_0|>\n\n<|body_start_1|>\n z1 = self.encoder(x1)\n z2 = self.encoder(x2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n return (p1, p2, z1.detach(), z2.detach())\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass SimSiam:\n \"\"\"Build a SimSiam model.\"\"\"\n\n def __init__(self, base_encoder, dim=2048, pred_dim=512):\n \"\"\"dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\"\"\"\n <|body_0|>\n\n def forward(self, x1, x2):\n \"\"\"Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SimSiam:\n \"\"\"Build a SimSiam model.\"\"\"\n\n def __init__(self, base_encoder, dim=2048, pred_dim=512):\n \"\"\"dim: feature dimension (default: 2048) pred_dim: hidden dimension of the predictor (default: 512)\"\"\"\n super(SimSiam, self).__init__()\n self.encoder = base_encoder(num_classes=dim, zero_init_residual=True)\n prev_dim = self.encoder.fc.weight.shape[1]\n self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), nn.Linear(prev_dim, prev_dim, bias=False), nn.BatchNorm1d(prev_dim), nn.ReLU(inplace=True), self.encoder.fc, nn.BatchNorm1d(dim, affine=False))\n self.encoder.fc[6].bias.requires_grad = False\n self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), nn.BatchNorm1d(pred_dim), nn.ReLU(inplace=True), nn.Linear(pred_dim, dim))\n\n def forward(self, x1, x2):\n \"\"\"Input: x1: first views of images x2: second views of images Output: p1, p2, z1, z2: predictors and targets of the network See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\"\"\"\n z1 = self.encoder(x1)\n z2 = self.encoder(x2)\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n return (p1, p2, z1.detach(), z2.detach())\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_facebookresearch_simsiam.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "val", "star_events_count": 35} {"blob_id": "44ce195471ed5a8196cc2154f6b1da8408af20c8", "bodies": ["self.lock = Lock()\nself.rate = rate\nself.ts = None\nself.count = 0", "with self.lock:\n now = time.time()\n if self.ts is None:\n self.ts = now\n if now - self.ts >= 1.0:\n self.count = 0\n self.ts = now\n self.count += 1\n if self.count <= self.rate:\n return\n time.sleep(self.ts + 1 - now)"], "bodies_text": "<|body_start_0|>\n self.lock = Lock()\n self.rate = rate\n self.ts = None\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n now = time.time()\n if self.ts is None:\n self.ts = now\n if now - self.ts >= 1.0:\n self.count = 0\n self.ts = now\n self.count += 1\n if self.count <= self.rate:\n return\n time.sleep(self.ts + 1 - now)\n<|end_body_1|>\n", "class_docstring": "Throttle Class", "class_name": "Throttle", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Throttle:\n \"\"\"Throttle Class\"\"\"\n\n def __init__(self, rate=150):\n \"\"\"Create a throttle for a specific rate/sec\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Return when the throttle limit is acceptable\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lock = Lock()\n self.rate = rate\n self.ts = None\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n now = time.time()\n if self.ts is None:\n self.ts = now\n if now - self.ts >= 1.0:\n self.count = 0\n self.ts = now\n self.count += 1\n if self.count <= self.rate:\n return\n time.sleep(self.ts + 1 - now)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000481", "length_bytes": 10830, "license_type": "permissive", "methods": [{"docstring": "Create a throttle for a specific rate/sec", "name": "__init__", "signature": "def __init__(self, rate=150)"}, {"docstring": "Return when the throttle limit is acceptable", "name": "__call__", "signature": "def __call__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003960", "prompt": "Implement the Python class `Throttle` described below.\n\nClass description:\nThrottle Class\n\nMethod signatures and docstrings:\n- def __init__(self, rate=150): Create a throttle for a specific rate/sec\n- def __call__(self): Return when the throttle limit is acceptable", "prompted_full_text": "Implement the Python class `Throttle` described below.\n\nClass description:\nThrottle Class\n\nMethod signatures and docstrings:\n- def __init__(self, rate=150): Create a throttle for a specific rate/sec\n- def __call__(self): Return when the throttle limit is acceptable\n\n<|skeleton|>\nclass Throttle:\n \"\"\"Throttle Class\"\"\"\n\n def __init__(self, rate=150):\n \"\"\"Create a throttle for a specific rate/sec\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Return when the throttle limit is acceptable\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lock = Lock()\n self.rate = rate\n self.ts = None\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n now = time.time()\n if self.ts is None:\n self.ts = now\n if now - self.ts >= 1.0:\n self.count = 0\n self.ts = now\n self.count += 1\n if self.count <= self.rate:\n return\n time.sleep(self.ts + 1 - now)\n<|end_body_1|>\n", "revision_id": "0f2e6a2d1c71f104b1522fd68ec01b9f9f3b92f9", "skeleton": "<|skeleton|>\nclass Throttle:\n \"\"\"Throttle Class\"\"\"\n\n def __init__(self, rate=150):\n \"\"\"Create a throttle for a specific rate/sec\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Return when the throttle limit is acceptable\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Throttle:\n \"\"\"Throttle Class\"\"\"\n\n def __init__(self, rate=150):\n \"\"\"Create a throttle for a specific rate/sec\"\"\"\n self.lock = Lock()\n self.rate = rate\n self.ts = None\n self.count = 0\n\n def __call__(self):\n \"\"\"Return when the throttle limit is acceptable\"\"\"\n with self.lock:\n now = time.time()\n if self.ts is None:\n self.ts = now\n if now - self.ts >= 1.0:\n self.count = 0\n self.ts = now\n self.count += 1\n if self.count <= self.rate:\n return\n time.sleep(self.ts + 1 - now)\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/TCPB_-_Bulk_DNS_Lookup/src/app.py", "source_repo": "ThreatConnect-Inc/threatconnect-playbooks", "split": "val", "star_events_count": 76} {"blob_id": "77ca8fe62a4c7a3f3aaad55e066b57154cefc059", "bodies": ["self.dic = dict()\nl = len(words)\nself.max = l\nfor i in xrange(l):\n word = words[i]\n if word in self.dic:\n self.dic[word].append(i)\n else:\n self.dic[word] = [i]", "l1, l2 = (self.dic[word1], self.dic[word2])\nn1, n2 = (len(l1), len(l2))\np1, p2 = (0, 0)\nret = self.max\nwhile p1 < n1 and p2 < n2:\n i1, i2 = (l1[p1], l2[p2])\n if i1 < i2:\n ret = min(i2 - i1, ret)\n p1 += 1\n else:\n ret = min(i1 - i2, ret)\n p2 += 1\nreturn ret"], "bodies_text": "<|body_start_0|>\n self.dic = dict()\n l = len(words)\n self.max = l\n for i in xrange(l):\n word = words[i]\n if word in self.dic:\n self.dic[word].append(i)\n else:\n self.dic[word] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.dic[word1], self.dic[word2])\n n1, n2 = (len(l1), len(l2))\n p1, p2 = (0, 0)\n ret = self.max\n while p1 < n1 and p2 < n2:\n i1, i2 = (l1[p1], l2[p2])\n if i1 < i2:\n ret = min(i2 - i1, ret)\n p1 += 1\n else:\n ret = min(i1 - i2, ret)\n p2 += 1\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "WordDistance", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dic = dict()\n l = len(words)\n self.max = l\n for i in xrange(l):\n word = words[i]\n if word in self.dic:\n self.dic[word].append(i)\n else:\n self.dic[word] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.dic[word1], self.dic[word2])\n n1, n2 = (len(l1), len(l2))\n p1, p2 = (0, 0)\n ret = self.max\n while p1 < n1 and p2 < n2:\n i1, i2 = (l1[p1], l2[p2])\n if i1 < i2:\n ret = min(i2 - i1, ret)\n p1 += 1\n else:\n ret = min(i1 - i2, ret)\n p2 += 1\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000482", "length_bytes": 1174, "license_type": "no_license", "methods": [{"docstring": "initialize your data structure here. :type words: List[str]", "name": "__init__", "signature": "def __init__(self, words)"}, {"docstring": "Adds a word into the data structure. :type word1: str :type word2: str :rtype: int", "name": "shortest", "signature": "def shortest(self, word1, word2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005228", "prompt": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): initialize your data structure here. :type words: List[str]\n- def shortest(self, word1, word2): Adds a word into the data structure. :type word1: str :type word2: str :rtype: int", "prompted_full_text": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): initialize your data structure here. :type words: List[str]\n- def shortest(self, word1, word2): Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\n\n<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dic = dict()\n l = len(words)\n self.max = l\n for i in xrange(l):\n word = words[i]\n if word in self.dic:\n self.dic[word].append(i)\n else:\n self.dic[word] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.dic[word1], self.dic[word2])\n n1, n2 = (len(l1), len(l2))\n p1, p2 = (0, 0)\n ret = self.max\n while p1 < n1 and p2 < n2:\n i1, i2 = (l1[p1], l2[p2])\n if i1 < i2:\n ret = min(i2 - i1, ret)\n p1 += 1\n else:\n ret = min(i1 - i2, ret)\n p2 += 1\n return ret\n<|end_body_1|>\n", "revision_id": "d6fac85a94a7188e93d4e202e67b6485562d12bd", "skeleton": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WordDistance:\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n self.dic = dict()\n l = len(words)\n self.max = l\n for i in xrange(l):\n word = words[i]\n if word in self.dic:\n self.dic[word].append(i)\n else:\n self.dic[word] = [i]\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n l1, l2 = (self.dic[word1], self.dic[word2])\n n1, n2 = (len(l1), len(l2))\n p1, p2 = (0, 0)\n ret = self.max\n while p1 < n1 and p2 < n2:\n i1, i2 = (l1[p1], l2[p2])\n if i1 < i2:\n ret = min(i2 - i1, ret)\n p1 += 1\n else:\n ret = min(i1 - i2, ret)\n p2 += 1\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "lc244.py", "source_repo": "GeorgyZhou/Leetcode-Problem", "split": "val", "star_events_count": 0} {"blob_id": "1aaee45ad67cba7f3f8226db140a91c7e75a86c5", "bodies": ["super().__init__()\nself.root_dir = root_dir\nself.dir_filter = dir_filter\nself.file_keys = file_keys\nself.file_path_generator = file_path_generator\nself.file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension\nself.data = {}\ndata_dir = self._crawl_directories()\nself._crawl_data(data_dir)", "for id_, path in data_dir.items():\n data_dict = {id_: path}\n for item in self.file_keys:\n file_path = self.file_path_generator.get_full_file_path(id_, path, item, self.file_extension)\n data_dict[item] = file_path\n self.data[id_] = data_dict", "if not os.path.isdir(self.root_dir):\n raise ValueError('root_dir {} does not exist'.format(self.root_dir))\ndata_dirs = next(os.walk(self.root_dir))[1]\nif self.dir_filter:\n data_dirs = self.dir_filter.filter_directories(data_dirs)\nreturn {data_dir: os.path.join(self.root_dir, data_dir) for data_dir in data_dirs if any((file.endswith(self.file_extension) for file in os.listdir(os.path.join(self.root_dir, data_dir))))}"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.root_dir = root_dir\n self.dir_filter = dir_filter\n self.file_keys = file_keys\n self.file_path_generator = file_path_generator\n self.file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension\n self.data = {}\n data_dir = self._crawl_directories()\n self._crawl_data(data_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n for id_, path in data_dir.items():\n data_dict = {id_: path}\n for item in self.file_keys:\n file_path = self.file_path_generator.get_full_file_path(id_, path, item, self.file_extension)\n data_dict[item] = file_path\n self.data[id_] = data_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if not os.path.isdir(self.root_dir):\n raise ValueError('root_dir {} does not exist'.format(self.root_dir))\n data_dirs = next(os.walk(self.root_dir))[1]\n if self.dir_filter:\n data_dirs = self.dir_filter.filter_directories(data_dirs)\n return {data_dir: os.path.join(self.root_dir, data_dir) for data_dir in data_dirs if any((file.endswith(self.file_extension) for file in os.listdir(os.path.join(self.root_dir, data_dir))))}\n<|end_body_2|>\n", "class_docstring": "Represents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M", "class_name": "FileSystemDataCrawler", "detected_licenses": ["Apache-2.0", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FileSystemDataCrawler:\n \"\"\"Represents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\"\"\"\n\n def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'):\n \"\"\"Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\"\"\"\n <|body_0|>\n\n def _crawl_data(self, data_dir: dict):\n \"\"\"Crawls the data inside a directory.\"\"\"\n <|body_1|>\n\n def _crawl_directories(self):\n \"\"\"Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.root_dir = root_dir\n self.dir_filter = dir_filter\n self.file_keys = file_keys\n self.file_path_generator = file_path_generator\n self.file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension\n self.data = {}\n data_dir = self._crawl_directories()\n self._crawl_data(data_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n for id_, path in data_dir.items():\n data_dict = {id_: path}\n for item in self.file_keys:\n file_path = self.file_path_generator.get_full_file_path(id_, path, item, self.file_extension)\n data_dict[item] = file_path\n self.data[id_] = data_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if not os.path.isdir(self.root_dir):\n raise ValueError('root_dir {} does not exist'.format(self.root_dir))\n data_dirs = next(os.walk(self.root_dir))[1]\n if self.dir_filter:\n data_dirs = self.dir_filter.filter_directories(data_dirs)\n return {data_dir: os.path.join(self.root_dir, data_dir) for data_dir in data_dirs if any((file.endswith(self.file_extension) for file in os.listdir(os.path.join(self.root_dir, data_dir))))}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000483", "length_bytes": 6815, "license_type": "permissive", "methods": [{"docstring": "Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).", "name": "__init__", "signature": "def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz')"}, {"docstring": "Crawls the data inside a directory.", "name": "_crawl_data", "signature": "def _crawl_data(self, data_dir: dict)"}, {"docstring": "Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.", "name": "_crawl_directories", "signature": "def _crawl_directories(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001739", "prompt": "Implement the Python class `FileSystemDataCrawler` described below.\n\nClass description:\nRepresents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\n\nMethod signatures and docstrings:\n- def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'): Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\n- def _crawl_data(self, data_dir: dict): Crawls the data inside a directory.\n- def _crawl_directories(self): Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.", "prompted_full_text": "Implement the Python class `FileSystemDataCrawler` described below.\n\nClass description:\nRepresents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\n\nMethod signatures and docstrings:\n- def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'): Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\n- def _crawl_data(self, data_dir: dict): Crawls the data inside a directory.\n- def _crawl_directories(self): Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.\n\n<|skeleton|>\nclass FileSystemDataCrawler:\n \"\"\"Represents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\"\"\"\n\n def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'):\n \"\"\"Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\"\"\"\n <|body_0|>\n\n def _crawl_data(self, data_dir: dict):\n \"\"\"Crawls the data inside a directory.\"\"\"\n <|body_1|>\n\n def _crawl_directories(self):\n \"\"\"Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.root_dir = root_dir\n self.dir_filter = dir_filter\n self.file_keys = file_keys\n self.file_path_generator = file_path_generator\n self.file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension\n self.data = {}\n data_dir = self._crawl_directories()\n self._crawl_data(data_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n for id_, path in data_dir.items():\n data_dict = {id_: path}\n for item in self.file_keys:\n file_path = self.file_path_generator.get_full_file_path(id_, path, item, self.file_extension)\n data_dict[item] = file_path\n self.data[id_] = data_dict\n<|end_body_1|>\n\n<|body_start_2|>\n if not os.path.isdir(self.root_dir):\n raise ValueError('root_dir {} does not exist'.format(self.root_dir))\n data_dirs = next(os.walk(self.root_dir))[1]\n if self.dir_filter:\n data_dirs = self.dir_filter.filter_directories(data_dirs)\n return {data_dir: os.path.join(self.root_dir, data_dir) for data_dir in data_dirs if any((file.endswith(self.file_extension) for file in os.listdir(os.path.join(self.root_dir, data_dir))))}\n<|end_body_2|>\n", "revision_id": "7917c6a6c4e3728db17ec762c63f8253392e6c04", "skeleton": "<|skeleton|>\nclass FileSystemDataCrawler:\n \"\"\"Represents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\"\"\"\n\n def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'):\n \"\"\"Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\"\"\"\n <|body_0|>\n\n def _crawl_data(self, data_dir: dict):\n \"\"\"Crawls the data inside a directory.\"\"\"\n <|body_1|>\n\n def _crawl_directories(self):\n \"\"\"Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FileSystemDataCrawler:\n \"\"\"Represents a file system data crawler. Examples: Suppose we have the following directory structure:: /path/to/root_dir ./Patient1 ./Image.mha ./GroundTruth.mha ./some_text_file.txt ./Patient2 ./Image.mha ./GroundTruth.mha ./GroundTruthRater2.mha ./Atlas ./Atlas.mha We can use the following code to load the images `Image.mha` and `GroundTruth.mha` in the directories `Patient1` and `Patient2`: >>> class MyImgType(enum.Enum): >>> T1 = 1 >>> GroundTruth = 2 >>> >>> class MyFilePathGenerator(FilePathGenerator): >>> @staticmethod >>> def get_full_file_path(id_: str, root_dir: str, file_key, file_extension: str) -> str: >>> if file_key == MyImgType.T1: >>> file_name = 'Image' >>> elif file_key == M\"\"\"\n\n def __init__(self, root_dir: str, file_keys: list, file_path_generator: FilePathGenerator, dir_filter: DirectoryFilter=None, file_extension: str='.nii.gz'):\n \"\"\"Initializes a new instance of the FileSystemDataCrawler class. Args: root_dir (str): The path to the root directory, which contains subdirectories with the data. file_keys (list): A list of objects, which represent human readable data identifiers (one identifier for each data file to crawl). file_path_generator (FilePathGenerator): A file path generator, which converts a human readable data identifier to an data file path. dir_filter (DirectoryFilter): A directory filter, which filters a list of directories. file_extension (str): The data file extension (with or without dot).\"\"\"\n super().__init__()\n self.root_dir = root_dir\n self.dir_filter = dir_filter\n self.file_keys = file_keys\n self.file_path_generator = file_path_generator\n self.file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension\n self.data = {}\n data_dir = self._crawl_directories()\n self._crawl_data(data_dir)\n\n def _crawl_data(self, data_dir: dict):\n \"\"\"Crawls the data inside a directory.\"\"\"\n for id_, path in data_dir.items():\n data_dict = {id_: path}\n for item in self.file_keys:\n file_path = self.file_path_generator.get_full_file_path(id_, path, item, self.file_extension)\n data_dict[item] = file_path\n self.data[id_] = data_dict\n\n def _crawl_directories(self):\n \"\"\"Crawls the directories, which contain data. Returns: dict: A dictionary where the keys are the directory names and the values the full path to the directory.\"\"\"\n if not os.path.isdir(self.root_dir):\n raise ValueError('root_dir {} does not exist'.format(self.root_dir))\n data_dirs = next(os.walk(self.root_dir))[1]\n if self.dir_filter:\n data_dirs = self.dir_filter.filter_directories(data_dirs)\n return {data_dir: os.path.join(self.root_dir, data_dir) for data_dir in data_dirs if any((file.endswith(self.file_extension) for file in os.listdir(os.path.join(self.root_dir, data_dir))))}\n", "source": "the_stack_v2_python_sparse", "source_path": "miapy/miapy/data/loading.py", "source_repo": "SCAN-NRAD/BrainRegressorCNN", "split": "val", "star_events_count": 3} {"blob_id": "9f40ea1c9c0ca2e59645b570f78456821219f777", "bodies": ["self.duration = duration\nself.ip = ip\nself.is_udp = is_udp\nself.is_uplink = is_uplink\nself.port = port", "link_type = 'UPLINK' if self.is_uplink else 'DOWNLINK'\nprotocol = 'UDP' if self.is_udp else 'TCP'\nreturn f'{type(self).__name__}: {link_type} {protocol} test, {self.duration} seconds for test device at {self.ip.exploded}:{self.port}'"], "bodies_text": "<|body_start_0|>\n self.duration = duration\n self.ip = ip\n self.is_udp = is_udp\n self.is_uplink = is_uplink\n self.port = port\n<|end_body_0|>\n\n<|body_start_1|>\n link_type = 'UPLINK' if self.is_uplink else 'DOWNLINK'\n protocol = 'UDP' if self.is_udp else 'TCP'\n return f'{type(self).__name__}: {link_type} {protocol} test, {self.duration} seconds for test device at {self.ip.exploded}:{self.port}'\n<|end_body_1|>\n", "class_docstring": "Information about the test instance for a single uplink/downlink traffic channel", "class_name": "TrafficTestInstance", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TrafficTestInstance:\n \"\"\"Information about the test instance for a single uplink/downlink traffic channel\"\"\"\n\n def __init__(self, is_uplink, is_udp, duration, ip, port):\n \"\"\"Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\"\"\"\n <|body_0|>\n\n def __repr__(self):\n \"\"\"String representation of this test instance\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.duration = duration\n self.ip = ip\n self.is_udp = is_udp\n self.is_uplink = is_uplink\n self.port = port\n<|end_body_0|>\n\n<|body_start_1|>\n link_type = 'UPLINK' if self.is_uplink else 'DOWNLINK'\n protocol = 'UDP' if self.is_udp else 'TCP'\n return f'{type(self).__name__}: {link_type} {protocol} test, {self.duration} seconds for test device at {self.ip.exploded}:{self.port}'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000484", "length_bytes": 7143, "license_type": "permissive", "methods": [{"docstring": "Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)", "name": "__init__", "signature": "def __init__(self, is_uplink, is_udp, duration, ip, port)"}, {"docstring": "String representation of this test instance", "name": "__repr__", "signature": "def __repr__(self)"}], "n_methods": 2, "prompt": "Implement the Python class `TrafficTestInstance` described below.\n\nClass description:\nInformation about the test instance for a single uplink/downlink traffic channel\n\nMethod signatures and docstrings:\n- def __init__(self, is_uplink, is_udp, duration, ip, port): Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\n- def __repr__(self): String representation of this test instance", "prompted_full_text": "Implement the Python class `TrafficTestInstance` described below.\n\nClass description:\nInformation about the test instance for a single uplink/downlink traffic channel\n\nMethod signatures and docstrings:\n- def __init__(self, is_uplink, is_udp, duration, ip, port): Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\n- def __repr__(self): String representation of this test instance\n\n<|skeleton|>\nclass TrafficTestInstance:\n \"\"\"Information about the test instance for a single uplink/downlink traffic channel\"\"\"\n\n def __init__(self, is_uplink, is_udp, duration, ip, port):\n \"\"\"Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\"\"\"\n <|body_0|>\n\n def __repr__(self):\n \"\"\"String representation of this test instance\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.duration = duration\n self.ip = ip\n self.is_udp = is_udp\n self.is_uplink = is_uplink\n self.port = port\n<|end_body_0|>\n\n<|body_start_1|>\n link_type = 'UPLINK' if self.is_uplink else 'DOWNLINK'\n protocol = 'UDP' if self.is_udp else 'TCP'\n return f'{type(self).__name__}: {link_type} {protocol} test, {self.duration} seconds for test device at {self.ip.exploded}:{self.port}'\n<|end_body_1|>\n", "revision_id": "0e1d895dfe625681229e181fbc2dbad83e13c5cb", "skeleton": "<|skeleton|>\nclass TrafficTestInstance:\n \"\"\"Information about the test instance for a single uplink/downlink traffic channel\"\"\"\n\n def __init__(self, is_uplink, is_udp, duration, ip, port):\n \"\"\"Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\"\"\"\n <|body_0|>\n\n def __repr__(self):\n \"\"\"String representation of this test instance\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TrafficTestInstance:\n \"\"\"Information about the test instance for a single uplink/downlink traffic channel\"\"\"\n\n def __init__(self, is_uplink, is_udp, duration, ip, port):\n \"\"\"Create a traffic test instance with the given values Args: is_uplink (bool): whether the test is uplink (else downlink) is_udp (bool): whether the test is UDP (else TCP) duration (int): the duration of the test, in seconds ip (ipaddress.ip_address): the IP of the test device (UE) port (int): the port number of the test device (UE)\"\"\"\n self.duration = duration\n self.ip = ip\n self.is_udp = is_udp\n self.is_uplink = is_uplink\n self.port = port\n\n def __repr__(self):\n \"\"\"String representation of this test instance\"\"\"\n link_type = 'UPLINK' if self.is_uplink else 'DOWNLINK'\n protocol = 'UDP' if self.is_udp else 'TCP'\n return f'{type(self).__name__}: {link_type} {protocol} test, {self.duration} seconds for test device at {self.ip.exploded}:{self.port}'\n", "source": "the_stack_v2_python_sparse", "source_path": "lte/gateway/python/integ_tests/s1aptests/util/traffic_messages.py", "source_repo": "magma/magma", "split": "val", "star_events_count": 1219} {"blob_id": "be38b6a161f0284d10e148c65b77da3e99bc3089", "bodies": ["if support_regional_security_policy or support_net_lb:\n cls.NAME_ARG = flags.PriorityArgument('delete', is_plural=True)\n cls.NAME_ARG.AddArgument(parser, operation_type='delete', cust_metavar='PRIORITY')\n flags.AddRegionFlag(parser, 'delete')\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyMultiScopeArgumentForRules()\nelse:\n flags.AddPriority(parser, 'delete', is_plural=True)\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyArgumentForRules()\ncls.SECURITY_POLICY_ARG.AddArgument(parser)\nparser.display_info.AddCacheUpdater(security_policies_flags.SecurityPoliciesCompleter)", "holder = base_classes.ComputeApiHolder(release_track)\nrefs = []\nif support_regional_security_policy or support_net_lb:\n if args.security_policy:\n security_policy_ref = cls.SECURITY_POLICY_ARG.ResolveAsResource(args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n if getattr(security_policy_ref, 'region', None) is not None:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': security_policy_ref.region, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n try:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': getattr(args, 'region', None)}))\n except (resources.RequiredFieldOmittedException, resources.WrongResourceCollectionException):\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail}))\nelse:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\nutils.PromptForDeletion(refs)\nrequests = []\nfor ref in refs:\n security_policy_rule = client.SecurityPolicyRule(ref, compute_client=holder.client)\n requests.extend(security_policy_rule.Delete(only_generate_request=True))\nreturn holder.client.MakeRequests(requests)"], "bodies_text": "<|body_start_0|>\n if support_regional_security_policy or support_net_lb:\n cls.NAME_ARG = flags.PriorityArgument('delete', is_plural=True)\n cls.NAME_ARG.AddArgument(parser, operation_type='delete', cust_metavar='PRIORITY')\n flags.AddRegionFlag(parser, 'delete')\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyMultiScopeArgumentForRules()\n else:\n flags.AddPriority(parser, 'delete', is_plural=True)\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyArgumentForRules()\n cls.SECURITY_POLICY_ARG.AddArgument(parser)\n parser.display_info.AddCacheUpdater(security_policies_flags.SecurityPoliciesCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n holder = base_classes.ComputeApiHolder(release_track)\n refs = []\n if support_regional_security_policy or support_net_lb:\n if args.security_policy:\n security_policy_ref = cls.SECURITY_POLICY_ARG.ResolveAsResource(args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n if getattr(security_policy_ref, 'region', None) is not None:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': security_policy_ref.region, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n try:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': getattr(args, 'region', None)}))\n except (resources.RequiredFieldOmittedException, resources.WrongResourceCollectionException):\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n utils.PromptForDeletion(refs)\n requests = []\n for ref in refs:\n security_policy_rule = client.SecurityPolicyRule(ref, compute_client=holder.client)\n requests.extend(security_policy_rule.Delete(only_generate_request=True))\n return holder.client.MakeRequests(requests)\n<|end_body_1|>\n", "class_docstring": "Delete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy", "class_name": "DeleteHelper", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeleteHelper:\n \"\"\"Delete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\"\"\"\n\n def Args(cls, parser, support_regional_security_policy, support_net_lb):\n \"\"\"Generates the flagset for a Delete command.\"\"\"\n <|body_0|>\n\n def Run(cls, release_track, args, support_regional_security_policy, support_net_lb):\n \"\"\"Validates arguments and deletes security policy rule(s).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if support_regional_security_policy or support_net_lb:\n cls.NAME_ARG = flags.PriorityArgument('delete', is_plural=True)\n cls.NAME_ARG.AddArgument(parser, operation_type='delete', cust_metavar='PRIORITY')\n flags.AddRegionFlag(parser, 'delete')\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyMultiScopeArgumentForRules()\n else:\n flags.AddPriority(parser, 'delete', is_plural=True)\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyArgumentForRules()\n cls.SECURITY_POLICY_ARG.AddArgument(parser)\n parser.display_info.AddCacheUpdater(security_policies_flags.SecurityPoliciesCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n holder = base_classes.ComputeApiHolder(release_track)\n refs = []\n if support_regional_security_policy or support_net_lb:\n if args.security_policy:\n security_policy_ref = cls.SECURITY_POLICY_ARG.ResolveAsResource(args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n if getattr(security_policy_ref, 'region', None) is not None:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': security_policy_ref.region, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n try:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': getattr(args, 'region', None)}))\n except (resources.RequiredFieldOmittedException, resources.WrongResourceCollectionException):\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n utils.PromptForDeletion(refs)\n requests = []\n for ref in refs:\n security_policy_rule = client.SecurityPolicyRule(ref, compute_client=holder.client)\n requests.extend(security_policy_rule.Delete(only_generate_request=True))\n return holder.client.MakeRequests(requests)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000485", "length_bytes": 7810, "license_type": "permissive", "methods": [{"docstring": "Generates the flagset for a Delete command.", "name": "Args", "signature": "def Args(cls, parser, support_regional_security_policy, support_net_lb)"}, {"docstring": "Validates arguments and deletes security policy rule(s).", "name": "Run", "signature": "def Run(cls, release_track, args, support_regional_security_policy, support_net_lb)"}], "n_methods": 2, "prompt": "Implement the Python class `DeleteHelper` described below.\n\nClass description:\nDelete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\n\nMethod signatures and docstrings:\n- def Args(cls, parser, support_regional_security_policy, support_net_lb): Generates the flagset for a Delete command.\n- def Run(cls, release_track, args, support_regional_security_policy, support_net_lb): Validates arguments and deletes security policy rule(s).", "prompted_full_text": "Implement the Python class `DeleteHelper` described below.\n\nClass description:\nDelete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\n\nMethod signatures and docstrings:\n- def Args(cls, parser, support_regional_security_policy, support_net_lb): Generates the flagset for a Delete command.\n- def Run(cls, release_track, args, support_regional_security_policy, support_net_lb): Validates arguments and deletes security policy rule(s).\n\n<|skeleton|>\nclass DeleteHelper:\n \"\"\"Delete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\"\"\"\n\n def Args(cls, parser, support_regional_security_policy, support_net_lb):\n \"\"\"Generates the flagset for a Delete command.\"\"\"\n <|body_0|>\n\n def Run(cls, release_track, args, support_regional_security_policy, support_net_lb):\n \"\"\"Validates arguments and deletes security policy rule(s).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if support_regional_security_policy or support_net_lb:\n cls.NAME_ARG = flags.PriorityArgument('delete', is_plural=True)\n cls.NAME_ARG.AddArgument(parser, operation_type='delete', cust_metavar='PRIORITY')\n flags.AddRegionFlag(parser, 'delete')\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyMultiScopeArgumentForRules()\n else:\n flags.AddPriority(parser, 'delete', is_plural=True)\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyArgumentForRules()\n cls.SECURITY_POLICY_ARG.AddArgument(parser)\n parser.display_info.AddCacheUpdater(security_policies_flags.SecurityPoliciesCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n holder = base_classes.ComputeApiHolder(release_track)\n refs = []\n if support_regional_security_policy or support_net_lb:\n if args.security_policy:\n security_policy_ref = cls.SECURITY_POLICY_ARG.ResolveAsResource(args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n if getattr(security_policy_ref, 'region', None) is not None:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': security_policy_ref.region, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n try:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': getattr(args, 'region', None)}))\n except (resources.RequiredFieldOmittedException, resources.WrongResourceCollectionException):\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n utils.PromptForDeletion(refs)\n requests = []\n for ref in refs:\n security_policy_rule = client.SecurityPolicyRule(ref, compute_client=holder.client)\n requests.extend(security_policy_rule.Delete(only_generate_request=True))\n return holder.client.MakeRequests(requests)\n<|end_body_1|>\n", "revision_id": "392abf004b16203030e6efd2f0af24db7c8d669e", "skeleton": "<|skeleton|>\nclass DeleteHelper:\n \"\"\"Delete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\"\"\"\n\n def Args(cls, parser, support_regional_security_policy, support_net_lb):\n \"\"\"Generates the flagset for a Delete command.\"\"\"\n <|body_0|>\n\n def Run(cls, release_track, args, support_regional_security_policy, support_net_lb):\n \"\"\"Validates arguments and deletes security policy rule(s).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeleteHelper:\n \"\"\"Delete Compute Engine security policy rules. *{command}* is used to delete security policy rules. ## EXAMPLES To delete the rule at priority 1000, run: $ {command} 1000 \\\\ --security-policy=my-policy\"\"\"\n\n def Args(cls, parser, support_regional_security_policy, support_net_lb):\n \"\"\"Generates the flagset for a Delete command.\"\"\"\n if support_regional_security_policy or support_net_lb:\n cls.NAME_ARG = flags.PriorityArgument('delete', is_plural=True)\n cls.NAME_ARG.AddArgument(parser, operation_type='delete', cust_metavar='PRIORITY')\n flags.AddRegionFlag(parser, 'delete')\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyMultiScopeArgumentForRules()\n else:\n flags.AddPriority(parser, 'delete', is_plural=True)\n cls.SECURITY_POLICY_ARG = security_policies_flags.SecurityPolicyArgumentForRules()\n cls.SECURITY_POLICY_ARG.AddArgument(parser)\n parser.display_info.AddCacheUpdater(security_policies_flags.SecurityPoliciesCompleter)\n\n def Run(cls, release_track, args, support_regional_security_policy, support_net_lb):\n \"\"\"Validates arguments and deletes security policy rule(s).\"\"\"\n holder = base_classes.ComputeApiHolder(release_track)\n refs = []\n if support_regional_security_policy or support_net_lb:\n if args.security_policy:\n security_policy_ref = cls.SECURITY_POLICY_ARG.ResolveAsResource(args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n if getattr(security_policy_ref, 'region', None) is not None:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': security_policy_ref.region, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n else:\n for name in args.names:\n try:\n refs.append(holder.resources.Parse(name, collection='compute.regionSecurityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'region': getattr(args, 'region', None)}))\n except (resources.RequiredFieldOmittedException, resources.WrongResourceCollectionException):\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail}))\n else:\n for name in args.names:\n refs.append(holder.resources.Parse(name, collection='compute.securityPolicyRules', params={'project': properties.VALUES.core.project.GetOrFail, 'securityPolicy': args.security_policy}))\n utils.PromptForDeletion(refs)\n requests = []\n for ref in refs:\n security_policy_rule = client.SecurityPolicyRule(ref, compute_client=holder.client)\n requests.extend(security_policy_rule.Delete(only_generate_request=True))\n return holder.client.MakeRequests(requests)\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/surface/compute/security_policies/rules/delete.py", "source_repo": "google-cloud-sdk-unofficial/google-cloud-sdk", "split": "val", "star_events_count": 9} {"blob_id": "7380f91b693d8f107dc3531aae1016f08af0926c", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\nurl = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/7a7aca614ad740e99b060e0ee787a228_3.csv'\nbl = pd.read_csv(url)\nnew_bl = pd.DataFrame({'Name': bl['Name_of_Pr'], 'Address': bl['Address'], 'Neighborhood': bl['Neighborho']})\nr = json.loads(new_bl.to_json(orient='records'))\nrepo.dropCollection('Boston_Landmarks')\nrepo.createCollection('Boston_Landmarks')\nrepo['hxjia_jiahaozh.Boston_Landmarks'].insert_many(r)\nrepo['hxjia_jiahaozh.Boston_Landmarks'].metadata({'complete': True})\nprint(repo['hxjia_jiahaozh.Boston_Landmarks'].metadata())\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/hxjia_jiahaozh/bostonlandmark')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/hxjia_jiahaozh/landmark')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\nthis_script = doc.agent('alg:hxjia_jiahaozh#get_boston_landmark', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nresource = doc.entity('bdp:7a7aca614ad740e99b060e0ee787a228_3', {'prov:label': 'Boston_Landmarks, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'csv'})\nget_landmarks = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_landmarks, this_script)\ndoc.usage(get_landmarks, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Boston+Landmarks&$select=Name,Address,Neighborhood'})\nlandmarks = doc.entity('dat:hxjia_jiahaozh#landmarks', {prov.model.PROV_LABEL: 'Boston Landmarks', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(landmarks, this_script)\ndoc.wasGeneratedBy(landmarks, get_landmarks, endTime)\ndoc.wasDerivedFrom(landmarks, resource, get_landmarks, get_landmarks, get_landmarks)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/7a7aca614ad740e99b060e0ee787a228_3.csv'\n bl = pd.read_csv(url)\n new_bl = pd.DataFrame({'Name': bl['Name_of_Pr'], 'Address': bl['Address'], 'Neighborhood': bl['Neighborho']})\n r = json.loads(new_bl.to_json(orient='records'))\n repo.dropCollection('Boston_Landmarks')\n repo.createCollection('Boston_Landmarks')\n repo['hxjia_jiahaozh.Boston_Landmarks'].insert_many(r)\n repo['hxjia_jiahaozh.Boston_Landmarks'].metadata({'complete': True})\n print(repo['hxjia_jiahaozh.Boston_Landmarks'].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/hxjia_jiahaozh/bostonlandmark')\n doc.add_namespace('dat', 'http://datamechanics.io/data/hxjia_jiahaozh/landmark')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:hxjia_jiahaozh#get_boston_landmark', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:7a7aca614ad740e99b060e0ee787a228_3', {'prov:label': 'Boston_Landmarks, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'csv'})\n get_landmarks = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_landmarks, this_script)\n doc.usage(get_landmarks, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Boston+Landmarks&$select=Name,Address,Neighborhood'})\n landmarks = doc.entity('dat:hxjia_jiahaozh#landmarks', {prov.model.PROV_LABEL: 'Boston Landmarks', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(landmarks, this_script)\n doc.wasGeneratedBy(landmarks, get_landmarks, endTime)\n doc.wasDerivedFrom(landmarks, resource, get_landmarks, get_landmarks, get_landmarks)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Get_Boston_Landmark", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Get_Boston_Landmark:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/7a7aca614ad740e99b060e0ee787a228_3.csv'\n bl = pd.read_csv(url)\n new_bl = pd.DataFrame({'Name': bl['Name_of_Pr'], 'Address': bl['Address'], 'Neighborhood': bl['Neighborho']})\n r = json.loads(new_bl.to_json(orient='records'))\n repo.dropCollection('Boston_Landmarks')\n repo.createCollection('Boston_Landmarks')\n repo['hxjia_jiahaozh.Boston_Landmarks'].insert_many(r)\n repo['hxjia_jiahaozh.Boston_Landmarks'].metadata({'complete': True})\n print(repo['hxjia_jiahaozh.Boston_Landmarks'].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/hxjia_jiahaozh/bostonlandmark')\n doc.add_namespace('dat', 'http://datamechanics.io/data/hxjia_jiahaozh/landmark')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:hxjia_jiahaozh#get_boston_landmark', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:7a7aca614ad740e99b060e0ee787a228_3', {'prov:label': 'Boston_Landmarks, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'csv'})\n get_landmarks = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_landmarks, this_script)\n doc.usage(get_landmarks, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Boston+Landmarks&$select=Name,Address,Neighborhood'})\n landmarks = doc.entity('dat:hxjia_jiahaozh#landmarks', {prov.model.PROV_LABEL: 'Boston Landmarks', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(landmarks, this_script)\n doc.wasGeneratedBy(landmarks, get_landmarks, endTime)\n doc.wasDerivedFrom(landmarks, resource, get_landmarks, get_landmarks, get_landmarks)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000486", "length_bytes": 4333, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets (not using the API here for the sake of simplicity).", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "prompt": "Implement the Python class `Get_Boston_Landmark` described below.\n\nClass description:\nImplement the Get_Boston_Landmark class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `Get_Boston_Landmark` described below.\n\nClass description:\nImplement the Get_Boston_Landmark class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass Get_Boston_Landmark:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/7a7aca614ad740e99b060e0ee787a228_3.csv'\n bl = pd.read_csv(url)\n new_bl = pd.DataFrame({'Name': bl['Name_of_Pr'], 'Address': bl['Address'], 'Neighborhood': bl['Neighborho']})\n r = json.loads(new_bl.to_json(orient='records'))\n repo.dropCollection('Boston_Landmarks')\n repo.createCollection('Boston_Landmarks')\n repo['hxjia_jiahaozh.Boston_Landmarks'].insert_many(r)\n repo['hxjia_jiahaozh.Boston_Landmarks'].metadata({'complete': True})\n print(repo['hxjia_jiahaozh.Boston_Landmarks'].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/hxjia_jiahaozh/bostonlandmark')\n doc.add_namespace('dat', 'http://datamechanics.io/data/hxjia_jiahaozh/landmark')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:hxjia_jiahaozh#get_boston_landmark', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:7a7aca614ad740e99b060e0ee787a228_3', {'prov:label': 'Boston_Landmarks, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'csv'})\n get_landmarks = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_landmarks, this_script)\n doc.usage(get_landmarks, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Boston+Landmarks&$select=Name,Address,Neighborhood'})\n landmarks = doc.entity('dat:hxjia_jiahaozh#landmarks', {prov.model.PROV_LABEL: 'Boston Landmarks', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(landmarks, this_script)\n doc.wasGeneratedBy(landmarks, get_landmarks, endTime)\n doc.wasDerivedFrom(landmarks, resource, get_landmarks, get_landmarks, get_landmarks)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "90284cf3debbac36eead07b8d2339cdd191b86cf", "skeleton": "<|skeleton|>\nclass Get_Boston_Landmark:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Get_Boston_Landmark:\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n url = 'http://bostonopendata-boston.opendata.arcgis.com/datasets/7a7aca614ad740e99b060e0ee787a228_3.csv'\n bl = pd.read_csv(url)\n new_bl = pd.DataFrame({'Name': bl['Name_of_Pr'], 'Address': bl['Address'], 'Neighborhood': bl['Neighborho']})\n r = json.loads(new_bl.to_json(orient='records'))\n repo.dropCollection('Boston_Landmarks')\n repo.createCollection('Boston_Landmarks')\n repo['hxjia_jiahaozh.Boston_Landmarks'].insert_many(r)\n repo['hxjia_jiahaozh.Boston_Landmarks'].metadata({'complete': True})\n print(repo['hxjia_jiahaozh.Boston_Landmarks'].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('hxjia_jiahaozh', 'hxjia_jiahaozh')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/hxjia_jiahaozh/bostonlandmark')\n doc.add_namespace('dat', 'http://datamechanics.io/data/hxjia_jiahaozh/landmark')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'http://bostonopendata-boston.opendata.arcgis.com/datasets/')\n this_script = doc.agent('alg:hxjia_jiahaozh#get_boston_landmark', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('bdp:7a7aca614ad740e99b060e0ee787a228_3', {'prov:label': 'Boston_Landmarks, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'csv'})\n get_landmarks = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_landmarks, this_script)\n doc.usage(get_landmarks, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Boston+Landmarks&$select=Name,Address,Neighborhood'})\n landmarks = doc.entity('dat:hxjia_jiahaozh#landmarks', {prov.model.PROV_LABEL: 'Boston Landmarks', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(landmarks, this_script)\n doc.wasGeneratedBy(landmarks, get_landmarks, endTime)\n doc.wasDerivedFrom(landmarks, resource, get_landmarks, get_landmarks, get_landmarks)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "hxjia_jiahaozh/Get_Boston_Landmark.py", "source_repo": "maximega/course-2019-spr-proj", "split": "val", "star_events_count": 2} {"blob_id": "cc7974301495a200f82301736e6ec4d6d05b4ca2", "bodies": ["if current + towers[current] >= len(towers):\n return current + towers[current]\nsub = towers[current + 1:current + towers[current] + 1]\noptions = []\nfor i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\nif options:\n sub_next = options.index(max(options))\n current += sub_next + 1\nreturn current", "if len(towers) == 0 or towers[0] == 0:\n return False\nprint('Testing {}'.format(towers))\ncurrent = 0\ncount = 0\nwhile True:\n if current >= len(towers):\n print('hopping with {} numbers of jumps'.format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)"], "bodies_text": "<|body_start_0|>\n if current + towers[current] >= len(towers):\n return current + towers[current]\n sub = towers[current + 1:current + towers[current] + 1]\n options = []\n for i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\n if options:\n sub_next = options.index(max(options))\n current += sub_next + 1\n return current\n<|end_body_0|>\n\n<|body_start_1|>\n if len(towers) == 0 or towers[0] == 0:\n return False\n print('Testing {}'.format(towers))\n current = 0\n count = 0\n while True:\n if current >= len(towers):\n print('hopping with {} numbers of jumps'.format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NativeSolution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NativeSolution:\n\n def next_step(current, towers):\n \"\"\"The following algorithm will try to identify the next best step :param current: :param towers: :return:\"\"\"\n <|body_0|>\n\n def is_hopable(towers):\n \"\"\"Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if current + towers[current] >= len(towers):\n return current + towers[current]\n sub = towers[current + 1:current + towers[current] + 1]\n options = []\n for i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\n if options:\n sub_next = options.index(max(options))\n current += sub_next + 1\n return current\n<|end_body_0|>\n\n<|body_start_1|>\n if len(towers) == 0 or towers[0] == 0:\n return False\n print('Testing {}'.format(towers))\n current = 0\n count = 0\n while True:\n if current >= len(towers):\n print('hopping with {} numbers of jumps'.format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000487", "length_bytes": 4795, "license_type": "permissive", "methods": [{"docstring": "The following algorithm will try to identify the next best step :param current: :param towers: :return:", "name": "next_step", "signature": "def next_step(current, towers)"}, {"docstring": "Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean", "name": "is_hopable", "signature": "def is_hopable(towers)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002142", "prompt": "Implement the Python class `NativeSolution` described below.\n\nClass description:\nImplement the NativeSolution class.\n\nMethod signatures and docstrings:\n- def next_step(current, towers): The following algorithm will try to identify the next best step :param current: :param towers: :return:\n- def is_hopable(towers): Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean", "prompted_full_text": "Implement the Python class `NativeSolution` described below.\n\nClass description:\nImplement the NativeSolution class.\n\nMethod signatures and docstrings:\n- def next_step(current, towers): The following algorithm will try to identify the next best step :param current: :param towers: :return:\n- def is_hopable(towers): Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean\n\n<|skeleton|>\nclass NativeSolution:\n\n def next_step(current, towers):\n \"\"\"The following algorithm will try to identify the next best step :param current: :param towers: :return:\"\"\"\n <|body_0|>\n\n def is_hopable(towers):\n \"\"\"Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if current + towers[current] >= len(towers):\n return current + towers[current]\n sub = towers[current + 1:current + towers[current] + 1]\n options = []\n for i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\n if options:\n sub_next = options.index(max(options))\n current += sub_next + 1\n return current\n<|end_body_0|>\n\n<|body_start_1|>\n if len(towers) == 0 or towers[0] == 0:\n return False\n print('Testing {}'.format(towers))\n current = 0\n count = 0\n while True:\n if current >= len(towers):\n print('hopping with {} numbers of jumps'.format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)\n<|end_body_1|>\n", "revision_id": "fd30805aa94332a6c14c9d8631c7044673fb3e2c", "skeleton": "<|skeleton|>\nclass NativeSolution:\n\n def next_step(current, towers):\n \"\"\"The following algorithm will try to identify the next best step :param current: :param towers: :return:\"\"\"\n <|body_0|>\n\n def is_hopable(towers):\n \"\"\"Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NativeSolution:\n def next_step(current, towers):\n \"\"\"The following algorithm will try to identify the next best step :param current: :param towers: :return:\"\"\"\n if current + towers[current] >= len(towers):\n return current + towers[current]\n sub = towers[current + 1:current + towers[current] + 1]\n options = []\n for i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\n if options:\n sub_next = options.index(max(options))\n current += sub_next + 1\n return current\n\n def is_hopable(towers):\n \"\"\"Check if a given seq can be hap till user in position 0 will be able to jump outside the seq :param towers: list of integers describes set of towers :return: Boolean\"\"\"\n if len(towers) == 0 or towers[0] == 0:\n return False\n print('Testing {}'.format(towers))\n current = 0\n count = 0\n while True:\n if current >= len(towers):\n print('hopping with {} numbers of jumps'.format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)\n", "source": "the_stack_v2_python_sparse", "source_path": "algo/problems/tower_hopper_problem.py", "source_repo": "avi3tal/knowledgebase", "split": "val", "star_events_count": 0} {"blob_id": "92d64889fbd888a966aa6ba04dddd59272162435", "bodies": ["if plan is None:\n plan = self.migration_plan(targets)\nfull_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\nstate = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\nself.check_replacements()\nreturn state", "migrations_to_run = {m[0] for m in plan}\nstate = ProjectState(real_apps=list(self.loader.unmigrated_apps))\nfor migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\nreturn state"], "bodies_text": "<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BackupMigrationExecutor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000488", "length_bytes": 2168, "license_type": "no_license", "methods": [{"docstring": "Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.", "name": "migrate", "signature": "def migrate(self, targets, plan=None, fake=False, fake_initial=False)"}, {"docstring": "Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.", "name": "_migrate_all_forwards", "signature": "def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial)"}], "n_methods": 2, "prompt": "Implement the Python class `BackupMigrationExecutor` described below.\n\nClass description:\nImplement the BackupMigrationExecutor class.\n\nMethod signatures and docstrings:\n- def migrate(self, targets, plan=None, fake=False, fake_initial=False): Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\n- def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial): Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.", "prompted_full_text": "Implement the Python class `BackupMigrationExecutor` described below.\n\nClass description:\nImplement the BackupMigrationExecutor class.\n\nMethod signatures and docstrings:\n- def migrate(self, targets, plan=None, fake=False, fake_initial=False): Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\n- def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial): Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\n\n<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "revision_id": "879111874d1ef70418b4890cf970720b0a2be4d8", "skeleton": "<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BackupMigrationExecutor:\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/backups/executor.py", "source_repo": "faierbol/syncano-platform", "split": "val", "star_events_count": 0} {"blob_id": "3fb539bf8d812a1b2dc2c76b9ac73ac440070192", "bodies": ["if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\nresult = 0\nsign = dividend * divisor < 0\ndividendabs = abs(dividend)\ndivisorabs = abs(divisor)\nwhile divisorabs <= dividendabs:\n temp, doubles = (divisorabs, 1)\n while temp << 1 <= dividendabs:\n temp <<= 1\n doubles <<= 1\n dividendabs -= temp\n result += doubles\nreturn -1 * result if sign else result", "if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\ndividendabs = abs(dividend)\ndivisorabs = abs(divisor)\nmaxShiftDigit = 0\nwhile divisorabs << maxShiftDigit <= dividendabs:\n maxShiftDigit += 1\nsign, result = (dividend * divisor < 0, 0)\nfor i in range(maxShiftDigit - 1, -1, -1):\n shiftValue = divisorabs << i\n if shiftValue <= dividendabs:\n result += 1 << i\n dividendabs -= shiftValue\nreturn -1 * result if sign else result"], "bodies_text": "<|body_start_0|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n result = 0\n sign = dividend * divisor < 0\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n while divisorabs <= dividendabs:\n temp, doubles = (divisorabs, 1)\n while temp << 1 <= dividendabs:\n temp <<= 1\n doubles <<= 1\n dividendabs -= temp\n result += doubles\n return -1 * result if sign else result\n<|end_body_0|>\n\n<|body_start_1|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n maxShiftDigit = 0\n while divisorabs << maxShiftDigit <= dividendabs:\n maxShiftDigit += 1\n sign, result = (dividend * divisor < 0, 0)\n for i in range(maxShiftDigit - 1, -1, -1):\n shiftValue = divisorabs << i\n if shiftValue <= dividendabs:\n result += 1 << i\n dividendabs -= shiftValue\n return -1 * result if sign else result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def divide(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_0|>\n\n def divide__shift(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n result = 0\n sign = dividend * divisor < 0\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n while divisorabs <= dividendabs:\n temp, doubles = (divisorabs, 1)\n while temp << 1 <= dividendabs:\n temp <<= 1\n doubles <<= 1\n dividendabs -= temp\n result += doubles\n return -1 * result if sign else result\n<|end_body_0|>\n\n<|body_start_1|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n maxShiftDigit = 0\n while divisorabs << maxShiftDigit <= dividendabs:\n maxShiftDigit += 1\n sign, result = (dividend * divisor < 0, 0)\n for i in range(maxShiftDigit - 1, -1, -1):\n shiftValue = divisorabs << i\n if shiftValue <= dividendabs:\n result += 1 << i\n dividendabs -= shiftValue\n return -1 * result if sign else result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000489", "length_bytes": 1590, "license_type": "no_license", "methods": [{"docstring": ":type dividend: int :type divisor: int :rtype: int", "name": "divide", "signature": "def divide(self, dividend, divisor)"}, {"docstring": ":type dividend: int :type divisor: int :rtype: int", "name": "divide__shift", "signature": "def divide__shift(self, dividend, divisor)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005658", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def divide(self, dividend, divisor): :type dividend: int :type divisor: int :rtype: int\n- def divide__shift(self, dividend, divisor): :type dividend: int :type divisor: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def divide(self, dividend, divisor): :type dividend: int :type divisor: int :rtype: int\n- def divide__shift(self, dividend, divisor): :type dividend: int :type divisor: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def divide(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_0|>\n\n def divide__shift(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n result = 0\n sign = dividend * divisor < 0\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n while divisorabs <= dividendabs:\n temp, doubles = (divisorabs, 1)\n while temp << 1 <= dividendabs:\n temp <<= 1\n doubles <<= 1\n dividendabs -= temp\n result += doubles\n return -1 * result if sign else result\n<|end_body_0|>\n\n<|body_start_1|>\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n maxShiftDigit = 0\n while divisorabs << maxShiftDigit <= dividendabs:\n maxShiftDigit += 1\n sign, result = (dividend * divisor < 0, 0)\n for i in range(maxShiftDigit - 1, -1, -1):\n shiftValue = divisorabs << i\n if shiftValue <= dividendabs:\n result += 1 << i\n dividendabs -= shiftValue\n return -1 * result if sign else result\n<|end_body_1|>\n", "revision_id": "b5c25f976866eefec33b96c638a4c5e127319e74", "skeleton": "<|skeleton|>\nclass Solution:\n\n def divide(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_0|>\n\n def divide__shift(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def divide(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n result = 0\n sign = dividend * divisor < 0\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n while divisorabs <= dividendabs:\n temp, doubles = (divisorabs, 1)\n while temp << 1 <= dividendabs:\n temp <<= 1\n doubles <<= 1\n dividendabs -= temp\n result += doubles\n return -1 * result if sign else result\n\n def divide__shift(self, dividend, divisor):\n \"\"\":type dividend: int :type divisor: int :rtype: int\"\"\"\n if not divisor or (dividend == -2 ** 31 and divisor == -1) or (dividend == 2 ** 31 and divisor == 1):\n return 2 ** 31 - 1\n dividendabs = abs(dividend)\n divisorabs = abs(divisor)\n maxShiftDigit = 0\n while divisorabs << maxShiftDigit <= dividendabs:\n maxShiftDigit += 1\n sign, result = (dividend * divisor < 0, 0)\n for i in range(maxShiftDigit - 1, -1, -1):\n shiftValue = divisorabs << i\n if shiftValue <= dividendabs:\n result += 1 << i\n dividendabs -= shiftValue\n return -1 * result if sign else result\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/029_Divide Two Integers.py", "source_repo": "Eddie02582/Leetcode", "split": "val", "star_events_count": 1} {"blob_id": "dcbdb5c8a4aae1c6068f9271b7bfb63fe8172704", "bodies": ["self.read_only = read_only\nself.secret_name = secret_name\nself.share_name = share_name", "if dictionary is None:\n return None\nread_only = dictionary.get('readOnly')\nsecret_name = dictionary.get('secretName')\nshare_name = dictionary.get('shareName')\nreturn cls(read_only, secret_name, share_name)"], "bodies_text": "<|body_start_0|>\n self.read_only = read_only\n self.secret_name = secret_name\n self.share_name = share_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n read_only = dictionary.get('readOnly')\n secret_name = dictionary.get('secretName')\n share_name = dictionary.get('shareName')\n return cls(read_only, secret_name, share_name)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.", "class_name": "PodInfo_PodSpec_VolumeInfo_AzureFile", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PodInfo_PodSpec_VolumeInfo_AzureFile:\n \"\"\"Implementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\"\"\"\n\n def __init__(self, read_only=None, secret_name=None, share_name=None):\n \"\"\"Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.read_only = read_only\n self.secret_name = secret_name\n self.share_name = share_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n read_only = dictionary.get('readOnly')\n secret_name = dictionary.get('secretName')\n share_name = dictionary.get('shareName')\n return cls(read_only, secret_name, share_name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000490", "length_bytes": 1847, "license_type": "permissive", "methods": [{"docstring": "Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class", "name": "__init__", "signature": "def __init__(self, read_only=None, secret_name=None, share_name=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005231", "prompt": "Implement the Python class `PodInfo_PodSpec_VolumeInfo_AzureFile` described below.\n\nClass description:\nImplementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\n\nMethod signatures and docstrings:\n- def __init__(self, read_only=None, secret_name=None, share_name=None): Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `PodInfo_PodSpec_VolumeInfo_AzureFile` described below.\n\nClass description:\nImplementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\n\nMethod signatures and docstrings:\n- def __init__(self, read_only=None, secret_name=None, share_name=None): Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass PodInfo_PodSpec_VolumeInfo_AzureFile:\n \"\"\"Implementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\"\"\"\n\n def __init__(self, read_only=None, secret_name=None, share_name=None):\n \"\"\"Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.read_only = read_only\n self.secret_name = secret_name\n self.share_name = share_name\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n read_only = dictionary.get('readOnly')\n secret_name = dictionary.get('secretName')\n share_name = dictionary.get('shareName')\n return cls(read_only, secret_name, share_name)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass PodInfo_PodSpec_VolumeInfo_AzureFile:\n \"\"\"Implementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\"\"\"\n\n def __init__(self, read_only=None, secret_name=None, share_name=None):\n \"\"\"Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PodInfo_PodSpec_VolumeInfo_AzureFile:\n \"\"\"Implementation of the 'PodInfo_PodSpec_VolumeInfo_AzureFile' model. TODO: type description here. Attributes: read_only (string): TODO: Type description here. secret_name (string): TODO: Type description here. share_name (string): TODO: Type description here.\"\"\"\n\n def __init__(self, read_only=None, secret_name=None, share_name=None):\n \"\"\"Constructor for the PodInfo_PodSpec_VolumeInfo_AzureFile class\"\"\"\n self.read_only = read_only\n self.secret_name = secret_name\n self.share_name = share_name\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n read_only = dictionary.get('readOnly')\n secret_name = dictionary.get('secretName')\n share_name = dictionary.get('shareName')\n return cls(read_only, secret_name, share_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/pod_info_pod_spec_volume_info_azure_file.py", "source_repo": "cohesity/management-sdk-python", "split": "val", "star_events_count": 24} {"blob_id": "8f628d9883f132531e7589e207ab2ae7091bff3e", "bodies": ["target_type = value.type.target()\nprint(\"'%s' is an array of '%s'.\" % (expr, str(target_type)))\nindex = 0\ntry:\n index = int(raw_input(\"Enter the index of the element you want to explore in '%s': \" % expr))\nexcept ValueError:\n if is_child:\n Explorer.return_to_parent_value()\n return False\nelement = None\ntry:\n element = value[index]\n str(element)\nexcept gdb.MemoryError:\n print('Cannot read value at index %d.' % index)\n raw_input('Press enter to continue... ')\n return True\nExplorer.explore_expr('%s[%d]' % (Explorer.guard_expr(expr), index), element, True)\nreturn True", "target_type = datatype.target()\nprint(\"%s is an array of '%s'.\" % (name, str(target_type)))\nExplorer.explore_type('the array element of %s' % name, target_type, is_child)\nreturn False"], "bodies_text": "<|body_start_0|>\n target_type = value.type.target()\n print(\"'%s' is an array of '%s'.\" % (expr, str(target_type)))\n index = 0\n try:\n index = int(raw_input(\"Enter the index of the element you want to explore in '%s': \" % expr))\n except ValueError:\n if is_child:\n Explorer.return_to_parent_value()\n return False\n element = None\n try:\n element = value[index]\n str(element)\n except gdb.MemoryError:\n print('Cannot read value at index %d.' % index)\n raw_input('Press enter to continue... ')\n return True\n Explorer.explore_expr('%s[%d]' % (Explorer.guard_expr(expr), index), element, True)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n target_type = datatype.target()\n print(\"%s is an array of '%s'.\" % (name, str(target_type)))\n Explorer.explore_type('the array element of %s' % name, target_type, is_child)\n return False\n<|end_body_1|>\n", "class_docstring": "Internal class used to explore arrays.", "class_name": "ArrayExplorer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ArrayExplorer:\n \"\"\"Internal class used to explore arrays.\"\"\"\n\n def explore_expr(expr, value, is_child):\n \"\"\"Function to explore array values. See Explorer.explore_expr for more information.\"\"\"\n <|body_0|>\n\n def explore_type(name, datatype, is_child):\n \"\"\"Function to explore array types. See Explorer.explore_type for more information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target_type = value.type.target()\n print(\"'%s' is an array of '%s'.\" % (expr, str(target_type)))\n index = 0\n try:\n index = int(raw_input(\"Enter the index of the element you want to explore in '%s': \" % expr))\n except ValueError:\n if is_child:\n Explorer.return_to_parent_value()\n return False\n element = None\n try:\n element = value[index]\n str(element)\n except gdb.MemoryError:\n print('Cannot read value at index %d.' % index)\n raw_input('Press enter to continue... ')\n return True\n Explorer.explore_expr('%s[%d]' % (Explorer.guard_expr(expr), index), element, True)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n target_type = datatype.target()\n print(\"%s is an array of '%s'.\" % (name, str(target_type)))\n Explorer.explore_type('the array element of %s' % name, target_type, is_child)\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000491", "length_bytes": 26692, "license_type": "permissive", "methods": [{"docstring": "Function to explore array values. See Explorer.explore_expr for more information.", "name": "explore_expr", "signature": "def explore_expr(expr, value, is_child)"}, {"docstring": "Function to explore array types. See Explorer.explore_type for more information.", "name": "explore_type", "signature": "def explore_type(name, datatype, is_child)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005121", "prompt": "Implement the Python class `ArrayExplorer` described below.\n\nClass description:\nInternal class used to explore arrays.\n\nMethod signatures and docstrings:\n- def explore_expr(expr, value, is_child): Function to explore array values. See Explorer.explore_expr for more information.\n- def explore_type(name, datatype, is_child): Function to explore array types. See Explorer.explore_type for more information.", "prompted_full_text": "Implement the Python class `ArrayExplorer` described below.\n\nClass description:\nInternal class used to explore arrays.\n\nMethod signatures and docstrings:\n- def explore_expr(expr, value, is_child): Function to explore array values. See Explorer.explore_expr for more information.\n- def explore_type(name, datatype, is_child): Function to explore array types. See Explorer.explore_type for more information.\n\n<|skeleton|>\nclass ArrayExplorer:\n \"\"\"Internal class used to explore arrays.\"\"\"\n\n def explore_expr(expr, value, is_child):\n \"\"\"Function to explore array values. See Explorer.explore_expr for more information.\"\"\"\n <|body_0|>\n\n def explore_type(name, datatype, is_child):\n \"\"\"Function to explore array types. See Explorer.explore_type for more information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target_type = value.type.target()\n print(\"'%s' is an array of '%s'.\" % (expr, str(target_type)))\n index = 0\n try:\n index = int(raw_input(\"Enter the index of the element you want to explore in '%s': \" % expr))\n except ValueError:\n if is_child:\n Explorer.return_to_parent_value()\n return False\n element = None\n try:\n element = value[index]\n str(element)\n except gdb.MemoryError:\n print('Cannot read value at index %d.' % index)\n raw_input('Press enter to continue... ')\n return True\n Explorer.explore_expr('%s[%d]' % (Explorer.guard_expr(expr), index), element, True)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n target_type = datatype.target()\n print(\"%s is an array of '%s'.\" % (name, str(target_type)))\n Explorer.explore_type('the array element of %s' % name, target_type, is_child)\n return False\n<|end_body_1|>\n", "revision_id": "b90664de0bd4c1897a9f1f5d9e360a9631d38b34", "skeleton": "<|skeleton|>\nclass ArrayExplorer:\n \"\"\"Internal class used to explore arrays.\"\"\"\n\n def explore_expr(expr, value, is_child):\n \"\"\"Function to explore array values. See Explorer.explore_expr for more information.\"\"\"\n <|body_0|>\n\n def explore_type(name, datatype, is_child):\n \"\"\"Function to explore array types. See Explorer.explore_type for more information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ArrayExplorer:\n \"\"\"Internal class used to explore arrays.\"\"\"\n\n def explore_expr(expr, value, is_child):\n \"\"\"Function to explore array values. See Explorer.explore_expr for more information.\"\"\"\n target_type = value.type.target()\n print(\"'%s' is an array of '%s'.\" % (expr, str(target_type)))\n index = 0\n try:\n index = int(raw_input(\"Enter the index of the element you want to explore in '%s': \" % expr))\n except ValueError:\n if is_child:\n Explorer.return_to_parent_value()\n return False\n element = None\n try:\n element = value[index]\n str(element)\n except gdb.MemoryError:\n print('Cannot read value at index %d.' % index)\n raw_input('Press enter to continue... ')\n return True\n Explorer.explore_expr('%s[%d]' % (Explorer.guard_expr(expr), index), element, True)\n return True\n\n def explore_type(name, datatype, is_child):\n \"\"\"Function to explore array types. See Explorer.explore_type for more information.\"\"\"\n target_type = datatype.target()\n print(\"%s is an array of '%s'.\" % (name, str(target_type)))\n Explorer.explore_type('the array element of %s' % name, target_type, is_child)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "toolchain/riscv/Linux/share/gdb/python/gdb/command/explore.py", "source_repo": "bouffalolab/bl_iot_sdk", "split": "val", "star_events_count": 244} {"blob_id": "cd81149eb09663da0f05505c52c051ed7981f3aa", "bodies": ["super().__init__(parent=parent)\nself.plotWidget: Optional['PlotWidget'] = None\nself.data: Optional[DataDictBase] = None\nlayout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self)\nlayout.setContentsMargins(0, 0, 0, 0)\nself.setLayout(layout)", "if widget is self.plotWidget:\n return\nif self.plotWidget is not None:\n self.layout().removeWidget(self.plotWidget)\n self.plotWidget.deleteLater()\nself.plotWidget = widget\nif self.plotWidget is not None:\n self.layout().addWidget(widget)\n self.plotWidget.setData(self.data)", "self.data = data\nif self.plotWidget is not None:\n self.plotWidget.setData(self.data)"], "bodies_text": "<|body_start_0|>\n super().__init__(parent=parent)\n self.plotWidget: Optional['PlotWidget'] = None\n self.data: Optional[DataDictBase] = None\n layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n<|end_body_0|>\n\n<|body_start_1|>\n if widget is self.plotWidget:\n return\n if self.plotWidget is not None:\n self.layout().removeWidget(self.plotWidget)\n self.plotWidget.deleteLater()\n self.plotWidget = widget\n if self.plotWidget is not None:\n self.layout().addWidget(widget)\n self.plotWidget.setData(self.data)\n<|end_body_1|>\n\n<|body_start_2|>\n self.data = data\n if self.plotWidget is not None:\n self.plotWidget.setData(self.data)\n<|end_body_2|>\n", "class_docstring": "This is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.", "class_name": "PlotWidgetContainer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlotWidgetContainer:\n \"\"\"This is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\"\"\"\n\n def __init__(self, parent: Optional[QtWidgets.QWidget]=None):\n \"\"\"Constructor for :class:`PlotWidgetContainer`.\"\"\"\n <|body_0|>\n\n def setPlotWidget(self, widget: 'PlotWidget') -> None:\n \"\"\"Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\"\"\"\n <|body_1|>\n\n def setData(self, data: DataDictBase) -> None:\n \"\"\"set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(parent=parent)\n self.plotWidget: Optional['PlotWidget'] = None\n self.data: Optional[DataDictBase] = None\n layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n<|end_body_0|>\n\n<|body_start_1|>\n if widget is self.plotWidget:\n return\n if self.plotWidget is not None:\n self.layout().removeWidget(self.plotWidget)\n self.plotWidget.deleteLater()\n self.plotWidget = widget\n if self.plotWidget is not None:\n self.layout().addWidget(widget)\n self.plotWidget.setData(self.data)\n<|end_body_1|>\n\n<|body_start_2|>\n self.data = data\n if self.plotWidget is not None:\n self.plotWidget.setData(self.data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000492", "length_bytes": 23346, "license_type": "permissive", "methods": [{"docstring": "Constructor for :class:`PlotWidgetContainer`.", "name": "__init__", "signature": "def __init__(self, parent: Optional[QtWidgets.QWidget]=None)"}, {"docstring": "Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget", "name": "setPlotWidget", "signature": "def setPlotWidget(self, widget: 'PlotWidget') -> None"}, {"docstring": "set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.", "name": "setData", "signature": "def setData(self, data: DataDictBase) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002353", "prompt": "Implement the Python class `PlotWidgetContainer` described below.\n\nClass description:\nThis is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\n\nMethod signatures and docstrings:\n- def __init__(self, parent: Optional[QtWidgets.QWidget]=None): Constructor for :class:`PlotWidgetContainer`.\n- def setPlotWidget(self, widget: 'PlotWidget') -> None: Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\n- def setData(self, data: DataDictBase) -> None: set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.", "prompted_full_text": "Implement the Python class `PlotWidgetContainer` described below.\n\nClass description:\nThis is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\n\nMethod signatures and docstrings:\n- def __init__(self, parent: Optional[QtWidgets.QWidget]=None): Constructor for :class:`PlotWidgetContainer`.\n- def setPlotWidget(self, widget: 'PlotWidget') -> None: Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\n- def setData(self, data: DataDictBase) -> None: set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.\n\n<|skeleton|>\nclass PlotWidgetContainer:\n \"\"\"This is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\"\"\"\n\n def __init__(self, parent: Optional[QtWidgets.QWidget]=None):\n \"\"\"Constructor for :class:`PlotWidgetContainer`.\"\"\"\n <|body_0|>\n\n def setPlotWidget(self, widget: 'PlotWidget') -> None:\n \"\"\"Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\"\"\"\n <|body_1|>\n\n def setData(self, data: DataDictBase) -> None:\n \"\"\"set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(parent=parent)\n self.plotWidget: Optional['PlotWidget'] = None\n self.data: Optional[DataDictBase] = None\n layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n<|end_body_0|>\n\n<|body_start_1|>\n if widget is self.plotWidget:\n return\n if self.plotWidget is not None:\n self.layout().removeWidget(self.plotWidget)\n self.plotWidget.deleteLater()\n self.plotWidget = widget\n if self.plotWidget is not None:\n self.layout().addWidget(widget)\n self.plotWidget.setData(self.data)\n<|end_body_1|>\n\n<|body_start_2|>\n self.data = data\n if self.plotWidget is not None:\n self.plotWidget.setData(self.data)\n<|end_body_2|>\n", "revision_id": "0ccdeb76d44fcc57e5b986c8b75cb0696fbff03b", "skeleton": "<|skeleton|>\nclass PlotWidgetContainer:\n \"\"\"This is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\"\"\"\n\n def __init__(self, parent: Optional[QtWidgets.QWidget]=None):\n \"\"\"Constructor for :class:`PlotWidgetContainer`.\"\"\"\n <|body_0|>\n\n def setPlotWidget(self, widget: 'PlotWidget') -> None:\n \"\"\"Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\"\"\"\n <|body_1|>\n\n def setData(self, data: DataDictBase) -> None:\n \"\"\"set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PlotWidgetContainer:\n \"\"\"This is the base widget for Plots, derived from `QWidget`. This widget does not implement any plotting. It merely is a wrapping widget that contains the actual plot widget in it. This actual plot widget can be set dynamically. Use :class:`PlotWidget` as base for implementing widgets that can be added to this container.\"\"\"\n\n def __init__(self, parent: Optional[QtWidgets.QWidget]=None):\n \"\"\"Constructor for :class:`PlotWidgetContainer`.\"\"\"\n super().__init__(parent=parent)\n self.plotWidget: Optional['PlotWidget'] = None\n self.data: Optional[DataDictBase] = None\n layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n\n def setPlotWidget(self, widget: 'PlotWidget') -> None:\n \"\"\"Set the plot widget. Makes sure that the added widget receives new data. :param widget: plot widget\"\"\"\n if widget is self.plotWidget:\n return\n if self.plotWidget is not None:\n self.layout().removeWidget(self.plotWidget)\n self.plotWidget.deleteLater()\n self.plotWidget = widget\n if self.plotWidget is not None:\n self.layout().addWidget(widget)\n self.plotWidget.setData(self.data)\n\n def setData(self, data: DataDictBase) -> None:\n \"\"\"set Data. If a plot widget is defined, call the widget's :meth:`PlotWidget.setData` method. :param data: input data to be plotted.\"\"\"\n self.data = data\n if self.plotWidget is not None:\n self.plotWidget.setData(self.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "plottr/plot/base.py", "source_repo": "labist/plottr", "split": "val", "star_events_count": 0} {"blob_id": "1030b9670235ecefde328f82df820232ca97e0f6", "bodies": ["self.battle = battle\nPygameController.__init__(self, screen)\nself.coroutine = self.performEntireRound()", "self.screen.setBottomView(None)\nPerformEvents(self.battle.eventQueue, self)\nself.coroutine.send(None)\nif self.battle.over:\n self.stopRunning()", "while not self.battle.over:\n self.performRound()\n yield\n self.refillSides()\n yield", "pokemonActions = {}\nfor pokemon in self.battle.playerSide.pkmnInPlay:\n if not pokemon.actionLock:\n actionMenuController = ActionMenuController(pokemon, self.battle, self.screen)\n self.runController(actionMenuController)\n if actionMenuController.action is None:\n return\n else:\n pokemonActions[pokemon] = actionMenuController.action\nself.battle.performRound(pokemonActions)", "pokemonReplacements = {}\nif self.battle.playerSide.hasMorePokemon():\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if pokemon.fainted():\n switchMenuController = SwitchMenuController(pokemon, cancellable=False)\n self.runController(switchMenuController)\n pokemonReplacements[pokemon] = switchMenuController.action.pkmnToSwitchTo\nself.battle.refillSides(pokemonReplacements)"], "bodies_text": "<|body_start_0|>\n self.battle = battle\n PygameController.__init__(self, screen)\n self.coroutine = self.performEntireRound()\n<|end_body_0|>\n\n<|body_start_1|>\n self.screen.setBottomView(None)\n PerformEvents(self.battle.eventQueue, self)\n self.coroutine.send(None)\n if self.battle.over:\n self.stopRunning()\n<|end_body_1|>\n\n<|body_start_2|>\n while not self.battle.over:\n self.performRound()\n yield\n self.refillSides()\n yield\n<|end_body_2|>\n\n<|body_start_3|>\n pokemonActions = {}\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if not pokemon.actionLock:\n actionMenuController = ActionMenuController(pokemon, self.battle, self.screen)\n self.runController(actionMenuController)\n if actionMenuController.action is None:\n return\n else:\n pokemonActions[pokemon] = actionMenuController.action\n self.battle.performRound(pokemonActions)\n<|end_body_3|>\n\n<|body_start_4|>\n pokemonReplacements = {}\n if self.battle.playerSide.hasMorePokemon():\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if pokemon.fainted():\n switchMenuController = SwitchMenuController(pokemon, cancellable=False)\n self.runController(switchMenuController)\n pokemonReplacements[pokemon] = switchMenuController.action.pkmnToSwitchTo\n self.battle.refillSides(pokemonReplacements)\n<|end_body_4|>\n", "class_docstring": "Controller for Battle Rounds", "class_name": "BattleRoundController", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BattleRoundController:\n \"\"\"Controller for Battle Rounds\"\"\"\n\n def __init__(self, battle, screen):\n \"\"\"Initialize the Battle Round Controller\"\"\"\n <|body_0|>\n\n def performGameCycle(self):\n \"\"\"Tells the battle object what to perform\"\"\"\n <|body_1|>\n\n def performEntireRound(self):\n \"\"\"Perform an Entire Round\"\"\"\n <|body_2|>\n\n def performRound(self):\n \"\"\"Perform a Single Round\"\"\"\n <|body_3|>\n\n def refillSides(self):\n \"\"\"Refill the Battle Sides\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.battle = battle\n PygameController.__init__(self, screen)\n self.coroutine = self.performEntireRound()\n<|end_body_0|>\n\n<|body_start_1|>\n self.screen.setBottomView(None)\n PerformEvents(self.battle.eventQueue, self)\n self.coroutine.send(None)\n if self.battle.over:\n self.stopRunning()\n<|end_body_1|>\n\n<|body_start_2|>\n while not self.battle.over:\n self.performRound()\n yield\n self.refillSides()\n yield\n<|end_body_2|>\n\n<|body_start_3|>\n pokemonActions = {}\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if not pokemon.actionLock:\n actionMenuController = ActionMenuController(pokemon, self.battle, self.screen)\n self.runController(actionMenuController)\n if actionMenuController.action is None:\n return\n else:\n pokemonActions[pokemon] = actionMenuController.action\n self.battle.performRound(pokemonActions)\n<|end_body_3|>\n\n<|body_start_4|>\n pokemonReplacements = {}\n if self.battle.playerSide.hasMorePokemon():\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if pokemon.fainted():\n switchMenuController = SwitchMenuController(pokemon, cancellable=False)\n self.runController(switchMenuController)\n pokemonReplacements[pokemon] = switchMenuController.action.pkmnToSwitchTo\n self.battle.refillSides(pokemonReplacements)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000493", "length_bytes": 2442, "license_type": "no_license", "methods": [{"docstring": "Initialize the Battle Round Controller", "name": "__init__", "signature": "def __init__(self, battle, screen)"}, {"docstring": "Tells the battle object what to perform", "name": "performGameCycle", "signature": "def performGameCycle(self)"}, {"docstring": "Perform an Entire Round", "name": "performEntireRound", "signature": "def performEntireRound(self)"}, {"docstring": "Perform a Single Round", "name": "performRound", "signature": "def performRound(self)"}, {"docstring": "Refill the Battle Sides", "name": "refillSides", "signature": "def refillSides(self)"}], "n_methods": 5, "prompt": "Implement the Python class `BattleRoundController` described below.\n\nClass description:\nController for Battle Rounds\n\nMethod signatures and docstrings:\n- def __init__(self, battle, screen): Initialize the Battle Round Controller\n- def performGameCycle(self): Tells the battle object what to perform\n- def performEntireRound(self): Perform an Entire Round\n- def performRound(self): Perform a Single Round\n- def refillSides(self): Refill the Battle Sides", "prompted_full_text": "Implement the Python class `BattleRoundController` described below.\n\nClass description:\nController for Battle Rounds\n\nMethod signatures and docstrings:\n- def __init__(self, battle, screen): Initialize the Battle Round Controller\n- def performGameCycle(self): Tells the battle object what to perform\n- def performEntireRound(self): Perform an Entire Round\n- def performRound(self): Perform a Single Round\n- def refillSides(self): Refill the Battle Sides\n\n<|skeleton|>\nclass BattleRoundController:\n \"\"\"Controller for Battle Rounds\"\"\"\n\n def __init__(self, battle, screen):\n \"\"\"Initialize the Battle Round Controller\"\"\"\n <|body_0|>\n\n def performGameCycle(self):\n \"\"\"Tells the battle object what to perform\"\"\"\n <|body_1|>\n\n def performEntireRound(self):\n \"\"\"Perform an Entire Round\"\"\"\n <|body_2|>\n\n def performRound(self):\n \"\"\"Perform a Single Round\"\"\"\n <|body_3|>\n\n def refillSides(self):\n \"\"\"Refill the Battle Sides\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.battle = battle\n PygameController.__init__(self, screen)\n self.coroutine = self.performEntireRound()\n<|end_body_0|>\n\n<|body_start_1|>\n self.screen.setBottomView(None)\n PerformEvents(self.battle.eventQueue, self)\n self.coroutine.send(None)\n if self.battle.over:\n self.stopRunning()\n<|end_body_1|>\n\n<|body_start_2|>\n while not self.battle.over:\n self.performRound()\n yield\n self.refillSides()\n yield\n<|end_body_2|>\n\n<|body_start_3|>\n pokemonActions = {}\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if not pokemon.actionLock:\n actionMenuController = ActionMenuController(pokemon, self.battle, self.screen)\n self.runController(actionMenuController)\n if actionMenuController.action is None:\n return\n else:\n pokemonActions[pokemon] = actionMenuController.action\n self.battle.performRound(pokemonActions)\n<|end_body_3|>\n\n<|body_start_4|>\n pokemonReplacements = {}\n if self.battle.playerSide.hasMorePokemon():\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if pokemon.fainted():\n switchMenuController = SwitchMenuController(pokemon, cancellable=False)\n self.runController(switchMenuController)\n pokemonReplacements[pokemon] = switchMenuController.action.pkmnToSwitchTo\n self.battle.refillSides(pokemonReplacements)\n<|end_body_4|>\n", "revision_id": "3931eee5fd04e18bb1738a0b27a4c6979dc4db01", "skeleton": "<|skeleton|>\nclass BattleRoundController:\n \"\"\"Controller for Battle Rounds\"\"\"\n\n def __init__(self, battle, screen):\n \"\"\"Initialize the Battle Round Controller\"\"\"\n <|body_0|>\n\n def performGameCycle(self):\n \"\"\"Tells the battle object what to perform\"\"\"\n <|body_1|>\n\n def performEntireRound(self):\n \"\"\"Perform an Entire Round\"\"\"\n <|body_2|>\n\n def performRound(self):\n \"\"\"Perform a Single Round\"\"\"\n <|body_3|>\n\n def refillSides(self):\n \"\"\"Refill the Battle Sides\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BattleRoundController:\n \"\"\"Controller for Battle Rounds\"\"\"\n\n def __init__(self, battle, screen):\n \"\"\"Initialize the Battle Round Controller\"\"\"\n self.battle = battle\n PygameController.__init__(self, screen)\n self.coroutine = self.performEntireRound()\n\n def performGameCycle(self):\n \"\"\"Tells the battle object what to perform\"\"\"\n self.screen.setBottomView(None)\n PerformEvents(self.battle.eventQueue, self)\n self.coroutine.send(None)\n if self.battle.over:\n self.stopRunning()\n\n def performEntireRound(self):\n \"\"\"Perform an Entire Round\"\"\"\n while not self.battle.over:\n self.performRound()\n yield\n self.refillSides()\n yield\n\n def performRound(self):\n \"\"\"Perform a Single Round\"\"\"\n pokemonActions = {}\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if not pokemon.actionLock:\n actionMenuController = ActionMenuController(pokemon, self.battle, self.screen)\n self.runController(actionMenuController)\n if actionMenuController.action is None:\n return\n else:\n pokemonActions[pokemon] = actionMenuController.action\n self.battle.performRound(pokemonActions)\n\n def refillSides(self):\n \"\"\"Refill the Battle Sides\"\"\"\n pokemonReplacements = {}\n if self.battle.playerSide.hasMorePokemon():\n for pokemon in self.battle.playerSide.pkmnInPlay:\n if pokemon.fainted():\n switchMenuController = SwitchMenuController(pokemon, cancellable=False)\n self.runController(switchMenuController)\n pokemonReplacements[pokemon] = switchMenuController.action.pkmnToSwitchTo\n self.battle.refillSides(pokemonReplacements)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Screen/Pygame/Battle/battle_round_controller.py", "source_repo": "sgtnourry/Pokemon-Project", "split": "val", "star_events_count": 0} {"blob_id": "b8b9cbdd0d22beb943304524fd5bce94486a25d7", "bodies": ["self.capacity = capacity\nself.l = DoublyLL()\nself.d = {}", "try:\n n = self.d[key]\nexcept KeyError:\n return -1\nself.l.Remove(n)\nself.l.InsertFirst(n)\nreturn n.value", "if key in self.d:\n node = self.d[key]\n self.l.Remove(node)\n self.l.InsertFirst(node)\n node.value = value\nelse:\n if self.l.count >= self.capacity:\n node = self.l.RemoveTail()\n del self.d[node.key]\n node = Node(key, value)\n self.l.InsertFirst(node)\n self.d[key] = node"], "bodies_text": "<|body_start_0|>\n self.capacity = capacity\n self.l = DoublyLL()\n self.d = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n n = self.d[key]\n except KeyError:\n return -1\n self.l.Remove(n)\n self.l.InsertFirst(n)\n return n.value\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.d:\n node = self.d[key]\n self.l.Remove(node)\n self.l.InsertFirst(node)\n node.value = value\n else:\n if self.l.count >= self.capacity:\n node = self.l.RemoveTail()\n del self.d[node.key]\n node = Node(key, value)\n self.l.InsertFirst(node)\n self.d[key] = node\n<|end_body_2|>\n", "class_docstring": "", "class_name": "LRUCache", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.l = DoublyLL()\n self.d = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n n = self.d[key]\n except KeyError:\n return -1\n self.l.Remove(n)\n self.l.InsertFirst(n)\n return n.value\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.d:\n node = self.d[key]\n self.l.Remove(node)\n self.l.InsertFirst(node)\n node.value = value\n else:\n if self.l.count >= self.capacity:\n node = self.l.RemoveTail()\n del self.d[node.key]\n node = Node(key, value)\n self.l.InsertFirst(node)\n self.d[key] = node\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000494", "length_bytes": 2641, "license_type": "no_license", "methods": [{"docstring": ":type capacity: int", "name": "__init__", "signature": "def __init__(self, capacity)"}, {"docstring": ":type key: int :rtype: int", "name": "get", "signature": "def get(self, key)"}, {"docstring": ":type key: int :type value: int :rtype: void", "name": "put", "signature": "def put(self, key, value)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003342", "prompt": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: void", "prompted_full_text": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: void\n\n<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.l = DoublyLL()\n self.d = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n n = self.d[key]\n except KeyError:\n return -1\n self.l.Remove(n)\n self.l.InsertFirst(n)\n return n.value\n<|end_body_1|>\n\n<|body_start_2|>\n if key in self.d:\n node = self.d[key]\n self.l.Remove(node)\n self.l.InsertFirst(node)\n node.value = value\n else:\n if self.l.count >= self.capacity:\n node = self.l.RemoveTail()\n del self.d[node.key]\n node = Node(key, value)\n self.l.InsertFirst(node)\n self.d[key] = node\n<|end_body_2|>\n", "revision_id": "786e1597b18cf5f16df0a3d7dfa0b80c1435de4d", "skeleton": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LRUCache:\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n self.capacity = capacity\n self.l = DoublyLL()\n self.d = {}\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n try:\n n = self.d[key]\n except KeyError:\n return -1\n self.l.Remove(n)\n self.l.InsertFirst(n)\n return n.value\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n if key in self.d:\n node = self.d[key]\n self.l.Remove(node)\n self.l.InsertFirst(node)\n node.value = value\n else:\n if self.l.count >= self.capacity:\n node = self.l.RemoveTail()\n del self.d[node.key]\n node = Node(key, value)\n self.l.InsertFirst(node)\n self.d[key] = node\n", "source": "the_stack_v2_python_sparse", "source_path": "No_146_LRU_Cache.py", "source_repo": "georgewashingturd/leetcode", "split": "val", "star_events_count": 0} {"blob_id": "dff14366abd8fb6fa0ae5f1baa8d0095699fb548", "bodies": ["value_json = {'type': 'struct', 'ctype': str(self.value.type), 'address': self.address, 'fields': []}\nfor field in self.value.type.fields():\n if not field.is_base_class:\n field_json = {'field': field.name, 'value': None}\n field_printer = general_lookup_function(self.value[field.name], self.visited_addresses_and_types)\n try:\n field_json['value'] = field_printer.to_json()\n except:\n field_json['value'] = 'extract failed'\n value_json['fields'].append(field_json)\nreturn value_json", "if cast_to_dynamic_type and self.value.type.strip_typedefs() != self.value.dynamic_type.strip_typedefs():\n self.value = self.value.cast(self.value.dynamic_type)\nvalue_json = self.to_json_without_expanding_base_class()\nif value_json['type'] != 'visited':\n base_classes_list = []\n for field in self.value.type.fields():\n if field.is_base_class:\n field_json = {'base_class': field.name, 'value': None}\n base_class_printer = ObjectPrinter(self.value.cast(field.type), self.visited_addresses_and_types)\n field_json['value'] = base_class_printer.to_json(cast_to_dynamic_type=False)\n base_classes_list.append(field_json)\n value_json['base_classes'] = base_classes_list\nreturn value_json"], "bodies_text": "<|body_start_0|>\n value_json = {'type': 'struct', 'ctype': str(self.value.type), 'address': self.address, 'fields': []}\n for field in self.value.type.fields():\n if not field.is_base_class:\n field_json = {'field': field.name, 'value': None}\n field_printer = general_lookup_function(self.value[field.name], self.visited_addresses_and_types)\n try:\n field_json['value'] = field_printer.to_json()\n except:\n field_json['value'] = 'extract failed'\n value_json['fields'].append(field_json)\n return value_json\n<|end_body_0|>\n\n<|body_start_1|>\n if cast_to_dynamic_type and self.value.type.strip_typedefs() != self.value.dynamic_type.strip_typedefs():\n self.value = self.value.cast(self.value.dynamic_type)\n value_json = self.to_json_without_expanding_base_class()\n if value_json['type'] != 'visited':\n base_classes_list = []\n for field in self.value.type.fields():\n if field.is_base_class:\n field_json = {'base_class': field.name, 'value': None}\n base_class_printer = ObjectPrinter(self.value.cast(field.type), self.visited_addresses_and_types)\n field_json['value'] = base_class_printer.to_json(cast_to_dynamic_type=False)\n base_classes_list.append(field_json)\n value_json['base_classes'] = base_classes_list\n return value_json\n<|end_body_1|>\n", "class_docstring": "A Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.", "class_name": "ObjectPrinter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObjectPrinter:\n \"\"\"A Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\"\"\"\n\n def to_json_without_expanding_base_class(self):\n \"\"\"A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\"\"\"\n <|body_0|>\n\n def to_json(self, cast_to_dynamic_type=True):\n \"\"\"Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value_json = {'type': 'struct', 'ctype': str(self.value.type), 'address': self.address, 'fields': []}\n for field in self.value.type.fields():\n if not field.is_base_class:\n field_json = {'field': field.name, 'value': None}\n field_printer = general_lookup_function(self.value[field.name], self.visited_addresses_and_types)\n try:\n field_json['value'] = field_printer.to_json()\n except:\n field_json['value'] = 'extract failed'\n value_json['fields'].append(field_json)\n return value_json\n<|end_body_0|>\n\n<|body_start_1|>\n if cast_to_dynamic_type and self.value.type.strip_typedefs() != self.value.dynamic_type.strip_typedefs():\n self.value = self.value.cast(self.value.dynamic_type)\n value_json = self.to_json_without_expanding_base_class()\n if value_json['type'] != 'visited':\n base_classes_list = []\n for field in self.value.type.fields():\n if field.is_base_class:\n field_json = {'base_class': field.name, 'value': None}\n base_class_printer = ObjectPrinter(self.value.cast(field.type), self.visited_addresses_and_types)\n field_json['value'] = base_class_printer.to_json(cast_to_dynamic_type=False)\n base_classes_list.append(field_json)\n value_json['base_classes'] = base_classes_list\n return value_json\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000495", "length_bytes": 14814, "license_type": "no_license", "methods": [{"docstring": "A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.", "name": "to_json_without_expanding_base_class", "signature": "def to_json_without_expanding_base_class(self)"}, {"docstring": "Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.", "name": "to_json", "signature": "def to_json(self, cast_to_dynamic_type=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000235", "prompt": "Implement the Python class `ObjectPrinter` described below.\n\nClass description:\nA Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\n\nMethod signatures and docstrings:\n- def to_json_without_expanding_base_class(self): A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\n- def to_json(self, cast_to_dynamic_type=True): Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.", "prompted_full_text": "Implement the Python class `ObjectPrinter` described below.\n\nClass description:\nA Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\n\nMethod signatures and docstrings:\n- def to_json_without_expanding_base_class(self): A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\n- def to_json(self, cast_to_dynamic_type=True): Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.\n\n<|skeleton|>\nclass ObjectPrinter:\n \"\"\"A Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\"\"\"\n\n def to_json_without_expanding_base_class(self):\n \"\"\"A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\"\"\"\n <|body_0|>\n\n def to_json(self, cast_to_dynamic_type=True):\n \"\"\"Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value_json = {'type': 'struct', 'ctype': str(self.value.type), 'address': self.address, 'fields': []}\n for field in self.value.type.fields():\n if not field.is_base_class:\n field_json = {'field': field.name, 'value': None}\n field_printer = general_lookup_function(self.value[field.name], self.visited_addresses_and_types)\n try:\n field_json['value'] = field_printer.to_json()\n except:\n field_json['value'] = 'extract failed'\n value_json['fields'].append(field_json)\n return value_json\n<|end_body_0|>\n\n<|body_start_1|>\n if cast_to_dynamic_type and self.value.type.strip_typedefs() != self.value.dynamic_type.strip_typedefs():\n self.value = self.value.cast(self.value.dynamic_type)\n value_json = self.to_json_without_expanding_base_class()\n if value_json['type'] != 'visited':\n base_classes_list = []\n for field in self.value.type.fields():\n if field.is_base_class:\n field_json = {'base_class': field.name, 'value': None}\n base_class_printer = ObjectPrinter(self.value.cast(field.type), self.visited_addresses_and_types)\n field_json['value'] = base_class_printer.to_json(cast_to_dynamic_type=False)\n base_classes_list.append(field_json)\n value_json['base_classes'] = base_classes_list\n return value_json\n<|end_body_1|>\n", "revision_id": "78a61ca023cbf1a0cecfef8b97df2b274ac3a988", "skeleton": "<|skeleton|>\nclass ObjectPrinter:\n \"\"\"A Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\"\"\"\n\n def to_json_without_expanding_base_class(self):\n \"\"\"A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\"\"\"\n <|body_0|>\n\n def to_json(self, cast_to_dynamic_type=True):\n \"\"\"Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ObjectPrinter:\n \"\"\"A Printer for objects in C++. The current version won't extract the dynamic/actual type of objects, and the member variable of a parent/child class won't be printed. We expect to support this function later.\"\"\"\n\n def to_json_without_expanding_base_class(self):\n \"\"\"A helper function for the to_json method. It tries to extract all member variables of an object without casting it into base classes.\"\"\"\n value_json = {'type': 'struct', 'ctype': str(self.value.type), 'address': self.address, 'fields': []}\n for field in self.value.type.fields():\n if not field.is_base_class:\n field_json = {'field': field.name, 'value': None}\n field_printer = general_lookup_function(self.value[field.name], self.visited_addresses_and_types)\n try:\n field_json['value'] = field_printer.to_json()\n except:\n field_json['value'] = 'extract failed'\n value_json['fields'].append(field_json)\n return value_json\n\n def to_json(self, cast_to_dynamic_type=True):\n \"\"\"Output format: { 'type': 'struct', 'ctype': string format type in C++, 'address': string format address, 'base_classes': [] # a list of value casted into each base class 'fields': [] # a list of struct fields }. For each field in fields, its format is: { 'field': string format of field name, 'field_type': 'base_class'/'member', if it is a base class. the value will be the object which is casted into that base_class. Otherwise, the value is the json of the member variable. 'value': json of the field }.\"\"\"\n if cast_to_dynamic_type and self.value.type.strip_typedefs() != self.value.dynamic_type.strip_typedefs():\n self.value = self.value.cast(self.value.dynamic_type)\n value_json = self.to_json_without_expanding_base_class()\n if value_json['type'] != 'visited':\n base_classes_list = []\n for field in self.value.type.fields():\n if field.is_base_class:\n field_json = {'base_class': field.name, 'value': None}\n base_class_printer = ObjectPrinter(self.value.cast(field.type), self.visited_addresses_and_types)\n field_json['value'] = base_class_printer.to_json(cast_to_dynamic_type=False)\n base_classes_list.append(field_json)\n value_json['base_classes'] = base_classes_list\n return value_json\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/security/gdb/gdb_json_printer/gdb_json_printer/printers.py", "source_repo": "ZYHGOD-1/Aosp11", "split": "val", "star_events_count": 0} {"blob_id": "7d06735b8c2590eaadf87b85ff9e4b6db86fb396", "bodies": ["commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\ncommander_options = commander_parser.add_argument_group('commander options')\ncommander_options.add_argument('-H', '--halt', action='store_true', default=None, help='Halt core upon connect. (Deprecated, see --connect.)')\ncommander_options.add_argument('-N', '--no-init', action='store_true', help='Do not init debug system.')\ncommander_options.add_argument('--elf', metavar='PATH', help='Optionally specify ELF file being debugged.')\ncommander_options.add_argument('-c', '--command', dest='commands', metavar='CMD', action='append', nargs='+', help='Run commands.')\ncommander_options.add_argument('-x', '--execute', dest='commands', metavar='FILE', action='append', type=argparse.FileType('r'), help='Execute commands from file. Pass - for stdin.')\ncommander_options.add_argument('-i', '--interactive', action='store_true', help='Stay in interactive mode after running commands specified from command line or file.')\nreturn [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]", "if self._args.commands is not None:\n cmds = []\n for cmd in self._args.commands:\n if isinstance(cmd, io.IOBase):\n cmds.append(cmd)\n else:\n cmds.append(' '.join(cmd))\nelse:\n cmds = None\nPyOCDCommander(self._args, cmds).run()\nreturn 0"], "bodies_text": "<|body_start_0|>\n commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\n commander_options = commander_parser.add_argument_group('commander options')\n commander_options.add_argument('-H', '--halt', action='store_true', default=None, help='Halt core upon connect. (Deprecated, see --connect.)')\n commander_options.add_argument('-N', '--no-init', action='store_true', help='Do not init debug system.')\n commander_options.add_argument('--elf', metavar='PATH', help='Optionally specify ELF file being debugged.')\n commander_options.add_argument('-c', '--command', dest='commands', metavar='CMD', action='append', nargs='+', help='Run commands.')\n commander_options.add_argument('-x', '--execute', dest='commands', metavar='FILE', action='append', type=argparse.FileType('r'), help='Execute commands from file. Pass - for stdin.')\n commander_options.add_argument('-i', '--interactive', action='store_true', help='Stay in interactive mode after running commands specified from command line or file.')\n return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._args.commands is not None:\n cmds = []\n for cmd in self._args.commands:\n if isinstance(cmd, io.IOBase):\n cmds.append(cmd)\n else:\n cmds.append(' '.join(cmd))\n else:\n cmds = None\n PyOCDCommander(self._args, cmds).run()\n return 0\n<|end_body_1|>\n", "class_docstring": "@brief `pyocd commander` subcommand.", "class_name": "CommanderSubcommand", "detected_licenses": ["CC-BY-4.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommanderSubcommand:\n \"\"\"@brief `pyocd commander` subcommand.\"\"\"\n\n def get_args(cls) -> List[argparse.ArgumentParser]:\n \"\"\"@brief Add this subcommand to the subparsers object.\"\"\"\n <|body_0|>\n\n def invoke(self) -> int:\n \"\"\"@brief Handle 'commander' subcommand.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\n commander_options = commander_parser.add_argument_group('commander options')\n commander_options.add_argument('-H', '--halt', action='store_true', default=None, help='Halt core upon connect. (Deprecated, see --connect.)')\n commander_options.add_argument('-N', '--no-init', action='store_true', help='Do not init debug system.')\n commander_options.add_argument('--elf', metavar='PATH', help='Optionally specify ELF file being debugged.')\n commander_options.add_argument('-c', '--command', dest='commands', metavar='CMD', action='append', nargs='+', help='Run commands.')\n commander_options.add_argument('-x', '--execute', dest='commands', metavar='FILE', action='append', type=argparse.FileType('r'), help='Execute commands from file. Pass - for stdin.')\n commander_options.add_argument('-i', '--interactive', action='store_true', help='Stay in interactive mode after running commands specified from command line or file.')\n return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._args.commands is not None:\n cmds = []\n for cmd in self._args.commands:\n if isinstance(cmd, io.IOBase):\n cmds.append(cmd)\n else:\n cmds.append(' '.join(cmd))\n else:\n cmds = None\n PyOCDCommander(self._args, cmds).run()\n return 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_val_000496", "length_bytes": 3430, "license_type": "permissive", "methods": [{"docstring": "@brief Add this subcommand to the subparsers object.", "name": "get_args", "signature": "def get_args(cls) -> List[argparse.ArgumentParser]"}, {"docstring": "@brief Handle 'commander' subcommand.", "name": "invoke", "signature": "def invoke(self) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `CommanderSubcommand` described below.\n\nClass description:\n@brief `pyocd commander` subcommand.\n\nMethod signatures and docstrings:\n- def get_args(cls) -> List[argparse.ArgumentParser]: @brief Add this subcommand to the subparsers object.\n- def invoke(self) -> int: @brief Handle 'commander' subcommand.", "prompted_full_text": "Implement the Python class `CommanderSubcommand` described below.\n\nClass description:\n@brief `pyocd commander` subcommand.\n\nMethod signatures and docstrings:\n- def get_args(cls) -> List[argparse.ArgumentParser]: @brief Add this subcommand to the subparsers object.\n- def invoke(self) -> int: @brief Handle 'commander' subcommand.\n\n<|skeleton|>\nclass CommanderSubcommand:\n \"\"\"@brief `pyocd commander` subcommand.\"\"\"\n\n def get_args(cls) -> List[argparse.ArgumentParser]:\n \"\"\"@brief Add this subcommand to the subparsers object.\"\"\"\n <|body_0|>\n\n def invoke(self) -> int:\n \"\"\"@brief Handle 'commander' subcommand.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\n commander_options = commander_parser.add_argument_group('commander options')\n commander_options.add_argument('-H', '--halt', action='store_true', default=None, help='Halt core upon connect. (Deprecated, see --connect.)')\n commander_options.add_argument('-N', '--no-init', action='store_true', help='Do not init debug system.')\n commander_options.add_argument('--elf', metavar='PATH', help='Optionally specify ELF file being debugged.')\n commander_options.add_argument('-c', '--command', dest='commands', metavar='CMD', action='append', nargs='+', help='Run commands.')\n commander_options.add_argument('-x', '--execute', dest='commands', metavar='FILE', action='append', type=argparse.FileType('r'), help='Execute commands from file. Pass - for stdin.')\n commander_options.add_argument('-i', '--interactive', action='store_true', help='Stay in interactive mode after running commands specified from command line or file.')\n return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._args.commands is not None:\n cmds = []\n for cmd in self._args.commands:\n if isinstance(cmd, io.IOBase):\n cmds.append(cmd)\n else:\n cmds.append(' '.join(cmd))\n else:\n cmds = None\n PyOCDCommander(self._args, cmds).run()\n return 0\n<|end_body_1|>\n", "revision_id": "9253740baf46ebf4eacbce6bf3369150c5fb8ee0", "skeleton": "<|skeleton|>\nclass CommanderSubcommand:\n \"\"\"@brief `pyocd commander` subcommand.\"\"\"\n\n def get_args(cls) -> List[argparse.ArgumentParser]:\n \"\"\"@brief Add this subcommand to the subparsers object.\"\"\"\n <|body_0|>\n\n def invoke(self) -> int:\n \"\"\"@brief Handle 'commander' subcommand.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommanderSubcommand:\n \"\"\"@brief `pyocd commander` subcommand.\"\"\"\n\n def get_args(cls) -> List[argparse.ArgumentParser]:\n \"\"\"@brief Add this subcommand to the subparsers object.\"\"\"\n commander_parser = argparse.ArgumentParser(description=cls.HELP, add_help=False)\n commander_options = commander_parser.add_argument_group('commander options')\n commander_options.add_argument('-H', '--halt', action='store_true', default=None, help='Halt core upon connect. (Deprecated, see --connect.)')\n commander_options.add_argument('-N', '--no-init', action='store_true', help='Do not init debug system.')\n commander_options.add_argument('--elf', metavar='PATH', help='Optionally specify ELF file being debugged.')\n commander_options.add_argument('-c', '--command', dest='commands', metavar='CMD', action='append', nargs='+', help='Run commands.')\n commander_options.add_argument('-x', '--execute', dest='commands', metavar='FILE', action='append', type=argparse.FileType('r'), help='Execute commands from file. Pass - for stdin.')\n commander_options.add_argument('-i', '--interactive', action='store_true', help='Stay in interactive mode after running commands specified from command line or file.')\n return [cls.CommonOptions.COMMON, cls.CommonOptions.CONNECT, commander_parser]\n\n def invoke(self) -> int:\n \"\"\"@brief Handle 'commander' subcommand.\"\"\"\n if self._args.commands is not None:\n cmds = []\n for cmd in self._args.commands:\n if isinstance(cmd, io.IOBase):\n cmds.append(cmd)\n else:\n cmds.append(' '.join(cmd))\n else:\n cmds = None\n PyOCDCommander(self._args, cmds).run()\n return 0\n", "source": "the_stack_v2_python_sparse", "source_path": "pyocd/subcommands/commander_cmd.py", "source_repo": "pyocd/pyOCD", "split": "val", "star_events_count": 507} {"blob_id": "42530ea1614978bebd3d0074f79ca1f0e6d21a31", "bodies": ["dp = [''] * n\ndp[0] = '1'\nfor i in range(1, n):\n queue = list(dp[i - 1])\n tmp = ''\n c = 1\n while queue:\n cur = queue.pop(0)\n if queue and queue[0] == cur:\n c += 1\n else:\n tmp += str(c) + cur\n c = 1\n dp[i] = tmp\nreturn dp[n - 1]", "import collections\nres = '1'\nfor i in range(1, n):\n queue = collections.deque(res)\n res = ''\n c = 1\n while queue:\n cur = queue.popleft()\n if queue and queue[0] == cur:\n c += 1\n else:\n res += str(c) + cur\n c = 1\nreturn res", "s = '1'\nfor _ in range(nums - 1):\n s = ''.join((str(len(p[0])) + p[1] for p in re.findall('((.)\\\\2*)', s)))\nreturn s"], "bodies_text": "<|body_start_0|>\n dp = [''] * n\n dp[0] = '1'\n for i in range(1, n):\n queue = list(dp[i - 1])\n tmp = ''\n c = 1\n while queue:\n cur = queue.pop(0)\n if queue and queue[0] == cur:\n c += 1\n else:\n tmp += str(c) + cur\n c = 1\n dp[i] = tmp\n return dp[n - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n import collections\n res = '1'\n for i in range(1, n):\n queue = collections.deque(res)\n res = ''\n c = 1\n while queue:\n cur = queue.popleft()\n if queue and queue[0] == cur:\n c += 1\n else:\n res += str(c) + cur\n c = 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n s = '1'\n for _ in range(nums - 1):\n s = ''.join((str(len(p[0])) + p[1] for p in re.findall('((.)\\\\2*)', s)))\n return s\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def countAndSay(self, n):\n \"\"\"从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\"\"\"\n <|body_0|>\n\n def countAndSay3(self, n):\n \"\"\"优化空间 :type n: int :rtype: str\"\"\"\n <|body_1|>\n\n def countAndSay2(self, nums):\n \"\"\"骚方法:使用re 暂时还学不来 :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [''] * n\n dp[0] = '1'\n for i in range(1, n):\n queue = list(dp[i - 1])\n tmp = ''\n c = 1\n while queue:\n cur = queue.pop(0)\n if queue and queue[0] == cur:\n c += 1\n else:\n tmp += str(c) + cur\n c = 1\n dp[i] = tmp\n return dp[n - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n import collections\n res = '1'\n for i in range(1, n):\n queue = collections.deque(res)\n res = ''\n c = 1\n while queue:\n cur = queue.popleft()\n if queue and queue[0] == cur:\n c += 1\n else:\n res += str(c) + cur\n c = 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n s = '1'\n for _ in range(nums - 1):\n s = ''.join((str(len(p[0])) + p[1] for p in re.findall('((.)\\\\2*)', s)))\n return s\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000497", "length_bytes": 2207, "license_type": "no_license", "methods": [{"docstring": "从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str", "name": "countAndSay", "signature": "def countAndSay(self, n)"}, {"docstring": "优化空间 :type n: int :rtype: str", "name": "countAndSay3", "signature": "def countAndSay3(self, n)"}, {"docstring": "骚方法:使用re 暂时还学不来 :param nums: :return:", "name": "countAndSay2", "signature": "def countAndSay2(self, nums)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006809", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countAndSay(self, n): 从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\n- def countAndSay3(self, n): 优化空间 :type n: int :rtype: str\n- def countAndSay2(self, nums): 骚方法:使用re 暂时还学不来 :param nums: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countAndSay(self, n): 从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\n- def countAndSay3(self, n): 优化空间 :type n: int :rtype: str\n- def countAndSay2(self, nums): 骚方法:使用re 暂时还学不来 :param nums: :return:\n\n<|skeleton|>\nclass Solution:\n\n def countAndSay(self, n):\n \"\"\"从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\"\"\"\n <|body_0|>\n\n def countAndSay3(self, n):\n \"\"\"优化空间 :type n: int :rtype: str\"\"\"\n <|body_1|>\n\n def countAndSay2(self, nums):\n \"\"\"骚方法:使用re 暂时还学不来 :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [''] * n\n dp[0] = '1'\n for i in range(1, n):\n queue = list(dp[i - 1])\n tmp = ''\n c = 1\n while queue:\n cur = queue.pop(0)\n if queue and queue[0] == cur:\n c += 1\n else:\n tmp += str(c) + cur\n c = 1\n dp[i] = tmp\n return dp[n - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n import collections\n res = '1'\n for i in range(1, n):\n queue = collections.deque(res)\n res = ''\n c = 1\n while queue:\n cur = queue.popleft()\n if queue and queue[0] == cur:\n c += 1\n else:\n res += str(c) + cur\n c = 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n s = '1'\n for _ in range(nums - 1):\n s = ''.join((str(len(p[0])) + p[1] for p in re.findall('((.)\\\\2*)', s)))\n return s\n<|end_body_2|>\n", "revision_id": "5d3574ccd282d0146c83c286ae28d8baaabd4910", "skeleton": "<|skeleton|>\nclass Solution:\n\n def countAndSay(self, n):\n \"\"\"从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\"\"\"\n <|body_0|>\n\n def countAndSay3(self, n):\n \"\"\"优化空间 :type n: int :rtype: str\"\"\"\n <|body_1|>\n\n def countAndSay2(self, nums):\n \"\"\"骚方法:使用re 暂时还学不来 :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def countAndSay(self, n):\n \"\"\"从左往右推 使用队列存储上一个数,然后不断删掉第一个数,直到队列删光了,使用c计数 :type n: int :rtype: str\"\"\"\n dp = [''] * n\n dp[0] = '1'\n for i in range(1, n):\n queue = list(dp[i - 1])\n tmp = ''\n c = 1\n while queue:\n cur = queue.pop(0)\n if queue and queue[0] == cur:\n c += 1\n else:\n tmp += str(c) + cur\n c = 1\n dp[i] = tmp\n return dp[n - 1]\n\n def countAndSay3(self, n):\n \"\"\"优化空间 :type n: int :rtype: str\"\"\"\n import collections\n res = '1'\n for i in range(1, n):\n queue = collections.deque(res)\n res = ''\n c = 1\n while queue:\n cur = queue.popleft()\n if queue and queue[0] == cur:\n c += 1\n else:\n res += str(c) + cur\n c = 1\n return res\n\n def countAndSay2(self, nums):\n \"\"\"骚方法:使用re 暂时还学不来 :param nums: :return:\"\"\"\n s = '1'\n for _ in range(nums - 1):\n s = ''.join((str(len(p[0])) + p[1] for p in re.findall('((.)\\\\2*)', s)))\n return s\n", "source": "the_stack_v2_python_sparse", "source_path": "38_报数.py", "source_repo": "lovehhf/LeetCode", "split": "val", "star_events_count": 0} {"blob_id": "2e75f3f70ab13799d3b163d4f2873035a0de5839", "bodies": ["Container.__init__(self, 'get_string_dialog', padding=5)\nself.callback = callback\nself.sub(Label('prompt', prompt, pygame.Rect((0, 0), (200, 30))))\ntextbox = TextBox('textbox', pygame.Rect((0, 0), (200, 30)), return_callback=self.return_key)\nself.sub(textbox)\ndisplay.key_sensitive(textbox)\nself.sub(Button('OK', pygame.Rect((0, 0), (90, 30)), self.ok))\nreturn", "self.textbox.deactivate()\ncallback = self.callback\nstring = self.textbox.text\nself.destroy()\ncallback(string)\nreturn", "callback = self.callback\nself.destroy()\ncallback(text)\nreturn"], "bodies_text": "<|body_start_0|>\n Container.__init__(self, 'get_string_dialog', padding=5)\n self.callback = callback\n self.sub(Label('prompt', prompt, pygame.Rect((0, 0), (200, 30))))\n textbox = TextBox('textbox', pygame.Rect((0, 0), (200, 30)), return_callback=self.return_key)\n self.sub(textbox)\n display.key_sensitive(textbox)\n self.sub(Button('OK', pygame.Rect((0, 0), (90, 30)), self.ok))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.textbox.deactivate()\n callback = self.callback\n string = self.textbox.text\n self.destroy()\n callback(string)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n callback = self.callback\n self.destroy()\n callback(text)\n return\n<|end_body_2|>\n", "class_docstring": "A combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.", "class_name": "GetStringDialog", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetStringDialog:\n \"\"\"A combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\"\"\"\n\n def __init__(self, prompt, callback, display):\n \"\"\"Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\"\"\"\n <|body_0|>\n\n def ok(self, plane):\n \"\"\"Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_1|>\n\n def return_key(self, text):\n \"\"\"Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Container.__init__(self, 'get_string_dialog', padding=5)\n self.callback = callback\n self.sub(Label('prompt', prompt, pygame.Rect((0, 0), (200, 30))))\n textbox = TextBox('textbox', pygame.Rect((0, 0), (200, 30)), return_callback=self.return_key)\n self.sub(textbox)\n display.key_sensitive(textbox)\n self.sub(Button('OK', pygame.Rect((0, 0), (90, 30)), self.ok))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.textbox.deactivate()\n callback = self.callback\n string = self.textbox.text\n self.destroy()\n callback(string)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n callback = self.callback\n self.destroy()\n callback(text)\n return\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_val_000498", "length_bytes": 27668, "license_type": "permissive", "methods": [{"docstring": "Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.", "name": "__init__", "signature": "def __init__(self, prompt, callback, display)"}, {"docstring": "Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).", "name": "ok", "signature": "def ok(self, plane)"}, {"docstring": "Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).", "name": "return_key", "signature": "def return_key(self, text)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007202", "prompt": "Implement the Python class `GetStringDialog` described below.\n\nClass description:\nA combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\n\nMethod signatures and docstrings:\n- def __init__(self, prompt, callback, display): Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\n- def ok(self, plane): Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\n- def return_key(self, text): Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).", "prompted_full_text": "Implement the Python class `GetStringDialog` described below.\n\nClass description:\nA combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\n\nMethod signatures and docstrings:\n- def __init__(self, prompt, callback, display): Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\n- def ok(self, plane): Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\n- def return_key(self, text): Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\n\n<|skeleton|>\nclass GetStringDialog:\n \"\"\"A combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\"\"\"\n\n def __init__(self, prompt, callback, display):\n \"\"\"Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\"\"\"\n <|body_0|>\n\n def ok(self, plane):\n \"\"\"Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_1|>\n\n def return_key(self, text):\n \"\"\"Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Container.__init__(self, 'get_string_dialog', padding=5)\n self.callback = callback\n self.sub(Label('prompt', prompt, pygame.Rect((0, 0), (200, 30))))\n textbox = TextBox('textbox', pygame.Rect((0, 0), (200, 30)), return_callback=self.return_key)\n self.sub(textbox)\n display.key_sensitive(textbox)\n self.sub(Button('OK', pygame.Rect((0, 0), (90, 30)), self.ok))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n self.textbox.deactivate()\n callback = self.callback\n string = self.textbox.text\n self.destroy()\n callback(string)\n return\n<|end_body_1|>\n\n<|body_start_2|>\n callback = self.callback\n self.destroy()\n callback(text)\n return\n<|end_body_2|>\n", "revision_id": "c2fc3d4e9beedb8487cfa4bfa13bdf55ec36af97", "skeleton": "<|skeleton|>\nclass GetStringDialog:\n \"\"\"A combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\"\"\"\n\n def __init__(self, prompt, callback, display):\n \"\"\"Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\"\"\"\n <|body_0|>\n\n def ok(self, plane):\n \"\"\"Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_1|>\n\n def return_key(self, text):\n \"\"\"Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GetStringDialog:\n \"\"\"A combination of Container, Label, TextBox and Button that asks the user for a string. Additional attributes: GetStringDialog.callback The callback to be called callback(string) when the input is confirmed.\"\"\"\n\n def __init__(self, prompt, callback, display):\n \"\"\"Initialise. callback will be called callback(GetStringDialog.textbox.text) after the GetStringDialog is destroyed. It should call render() and flip the display to remove the GetStringDialog from the screen. display.key_sensitive() will be used to register the TextBox of this dialog.\"\"\"\n Container.__init__(self, 'get_string_dialog', padding=5)\n self.callback = callback\n self.sub(Label('prompt', prompt, pygame.Rect((0, 0), (200, 30))))\n textbox = TextBox('textbox', pygame.Rect((0, 0), (200, 30)), return_callback=self.return_key)\n self.sub(textbox)\n display.key_sensitive(textbox)\n self.sub(Button('OK', pygame.Rect((0, 0), (90, 30)), self.ok))\n return\n\n def ok(self, plane):\n \"\"\"Button callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n self.textbox.deactivate()\n callback = self.callback\n string = self.textbox.text\n self.destroy()\n callback(string)\n return\n\n def return_key(self, text):\n \"\"\"Return key callback to destroy the GetStringDialog and call GetStringDialog.callback(string).\"\"\"\n callback = self.callback\n self.destroy()\n callback(text)\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "reference_scripts/clickndrag-0.4.1/clickndrag/gui.py", "source_repo": "stivosaurus/rpi-snippets", "split": "val", "star_events_count": 1} {"blob_id": "4053fdb2d58aa73ed020015a2ce7d02cb4dd9a7f", "bodies": ["self.head = head\nself.current = head\nself.length = 1", "iter = self.head\noutput = []\nin_loop = False\nwhile iter is not None:\n if in_loop and iter == self.head:\n break\n if iter == self.current:\n output.append('(%s)' % iter.data)\n else:\n output.append(str(iter.data))\n iter = iter.next\n if not in_loop:\n in_loop = True\nprint(' '.join(output))", "if self.length == 1:\n node = DoubleLinkedListNode(data, self.head, self.head)\n self.head.next = node\n self.head.prev = node\nelse:\n prev = self.head.prev\n node = DoubleLinkedListNode(data, prev, self.head)\n if prev is not None:\n prev.next = node\n self.head.prev = node\nself.length += 1\nif set_ptr:\n self.current = node", "iter = self.current\npace = 0\nif offset > 0:\n while iter.next is not None and pace < offset - 1:\n iter = iter.next\n pace += 1\n node = DoubleLinkedListNode(data, iter, iter.next)\n iter.next.prev = node\n iter.next = node\n self.length += 1\nelif offset < 0:\n while iter.prev is not None and pace < -offset - 1:\n iter = iter.prev\n pace += 1\n node = DoubleLinkedListNode(data, iter.prev, iter)\n iter.prev.next = node\n iter.prev = node\n self.length += 1\nelse:\n self.current.data = data\nif set_ptr:\n self.current = node", "iter = self.current\npace = 0\nif offset > 0:\n while iter.next is not None and pace < offset:\n iter = iter.next\n pace += 1\nelif offset < 0:\n while iter.prev is not None and pace < -offset:\n iter = iter.prev\n pace += 1\nd = iter.data\np, n = (iter.prev, iter.next)\niter.prev.next = n\niter.next.prev = p\nself.length -= 1\nif iter == self.head:\n self.head = iter.next\nif set_ptr:\n self.current = iter.next\nreturn d"], "bodies_text": "<|body_start_0|>\n self.head = head\n self.current = head\n self.length = 1\n<|end_body_0|>\n\n<|body_start_1|>\n iter = self.head\n output = []\n in_loop = False\n while iter is not None:\n if in_loop and iter == self.head:\n break\n if iter == self.current:\n output.append('(%s)' % iter.data)\n else:\n output.append(str(iter.data))\n iter = iter.next\n if not in_loop:\n in_loop = True\n print(' '.join(output))\n<|end_body_1|>\n\n<|body_start_2|>\n if self.length == 1:\n node = DoubleLinkedListNode(data, self.head, self.head)\n self.head.next = node\n self.head.prev = node\n else:\n prev = self.head.prev\n node = DoubleLinkedListNode(data, prev, self.head)\n if prev is not None:\n prev.next = node\n self.head.prev = node\n self.length += 1\n if set_ptr:\n self.current = node\n<|end_body_2|>\n\n<|body_start_3|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset - 1:\n iter = iter.next\n pace += 1\n node = DoubleLinkedListNode(data, iter, iter.next)\n iter.next.prev = node\n iter.next = node\n self.length += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset - 1:\n iter = iter.prev\n pace += 1\n node = DoubleLinkedListNode(data, iter.prev, iter)\n iter.prev.next = node\n iter.prev = node\n self.length += 1\n else:\n self.current.data = data\n if set_ptr:\n self.current = node\n<|end_body_3|>\n\n<|body_start_4|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset:\n iter = iter.next\n pace += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset:\n iter = iter.prev\n pace += 1\n d = iter.data\n p, n = (iter.prev, iter.next)\n iter.prev.next = n\n iter.next.prev = p\n self.length -= 1\n if iter == self.head:\n self.head = iter.next\n if set_ptr:\n self.current = iter.next\n return d\n<|end_body_4|>\n", "class_docstring": "Util class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.", "class_name": "DoubleLinkedList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DoubleLinkedList:\n \"\"\"Util class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\"\"\"\n\n def __init__(self, head):\n \"\"\"Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\"\"\"\n <|body_0|>\n\n def print(self):\n \"\"\"Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\"\"\"\n <|body_1|>\n\n def add(self, data, set_ptr=False):\n \"\"\"Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_2|>\n\n def insert(self, offset, data, set_ptr=False):\n \"\"\"Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_3|>\n\n def remove(self, offset, set_ptr=False):\n \"\"\"Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.head = head\n self.current = head\n self.length = 1\n<|end_body_0|>\n\n<|body_start_1|>\n iter = self.head\n output = []\n in_loop = False\n while iter is not None:\n if in_loop and iter == self.head:\n break\n if iter == self.current:\n output.append('(%s)' % iter.data)\n else:\n output.append(str(iter.data))\n iter = iter.next\n if not in_loop:\n in_loop = True\n print(' '.join(output))\n<|end_body_1|>\n\n<|body_start_2|>\n if self.length == 1:\n node = DoubleLinkedListNode(data, self.head, self.head)\n self.head.next = node\n self.head.prev = node\n else:\n prev = self.head.prev\n node = DoubleLinkedListNode(data, prev, self.head)\n if prev is not None:\n prev.next = node\n self.head.prev = node\n self.length += 1\n if set_ptr:\n self.current = node\n<|end_body_2|>\n\n<|body_start_3|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset - 1:\n iter = iter.next\n pace += 1\n node = DoubleLinkedListNode(data, iter, iter.next)\n iter.next.prev = node\n iter.next = node\n self.length += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset - 1:\n iter = iter.prev\n pace += 1\n node = DoubleLinkedListNode(data, iter.prev, iter)\n iter.prev.next = node\n iter.prev = node\n self.length += 1\n else:\n self.current.data = data\n if set_ptr:\n self.current = node\n<|end_body_3|>\n\n<|body_start_4|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset:\n iter = iter.next\n pace += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset:\n iter = iter.prev\n pace += 1\n d = iter.data\n p, n = (iter.prev, iter.next)\n iter.prev.next = n\n iter.next.prev = p\n self.length -= 1\n if iter == self.head:\n self.head = iter.next\n if set_ptr:\n self.current = iter.next\n return d\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_val_000499", "length_bytes": 8037, "license_type": "no_license", "methods": [{"docstring": "Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode", "name": "__init__", "signature": "def __init__(self, head)"}, {"docstring": "Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).", "name": "print", "signature": "def print(self)"}, {"docstring": "Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool", "name": "add", "signature": "def add(self, data, set_ptr=False)"}, {"docstring": "Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool", "name": "insert", "signature": "def insert(self, offset, data, set_ptr=False)"}, {"docstring": "Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool", "name": "remove", "signature": "def remove(self, offset, set_ptr=False)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002589", "prompt": "Implement the Python class `DoubleLinkedList` described below.\n\nClass description:\nUtil class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\n\nMethod signatures and docstrings:\n- def __init__(self, head): Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\n- def print(self): Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\n- def add(self, data, set_ptr=False): Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\n- def insert(self, offset, data, set_ptr=False): Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\n- def remove(self, offset, set_ptr=False): Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool", "prompted_full_text": "Implement the Python class `DoubleLinkedList` described below.\n\nClass description:\nUtil class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\n\nMethod signatures and docstrings:\n- def __init__(self, head): Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\n- def print(self): Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\n- def add(self, data, set_ptr=False): Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\n- def insert(self, offset, data, set_ptr=False): Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\n- def remove(self, offset, set_ptr=False): Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool\n\n<|skeleton|>\nclass DoubleLinkedList:\n \"\"\"Util class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\"\"\"\n\n def __init__(self, head):\n \"\"\"Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\"\"\"\n <|body_0|>\n\n def print(self):\n \"\"\"Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\"\"\"\n <|body_1|>\n\n def add(self, data, set_ptr=False):\n \"\"\"Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_2|>\n\n def insert(self, offset, data, set_ptr=False):\n \"\"\"Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_3|>\n\n def remove(self, offset, set_ptr=False):\n \"\"\"Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.head = head\n self.current = head\n self.length = 1\n<|end_body_0|>\n\n<|body_start_1|>\n iter = self.head\n output = []\n in_loop = False\n while iter is not None:\n if in_loop and iter == self.head:\n break\n if iter == self.current:\n output.append('(%s)' % iter.data)\n else:\n output.append(str(iter.data))\n iter = iter.next\n if not in_loop:\n in_loop = True\n print(' '.join(output))\n<|end_body_1|>\n\n<|body_start_2|>\n if self.length == 1:\n node = DoubleLinkedListNode(data, self.head, self.head)\n self.head.next = node\n self.head.prev = node\n else:\n prev = self.head.prev\n node = DoubleLinkedListNode(data, prev, self.head)\n if prev is not None:\n prev.next = node\n self.head.prev = node\n self.length += 1\n if set_ptr:\n self.current = node\n<|end_body_2|>\n\n<|body_start_3|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset - 1:\n iter = iter.next\n pace += 1\n node = DoubleLinkedListNode(data, iter, iter.next)\n iter.next.prev = node\n iter.next = node\n self.length += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset - 1:\n iter = iter.prev\n pace += 1\n node = DoubleLinkedListNode(data, iter.prev, iter)\n iter.prev.next = node\n iter.prev = node\n self.length += 1\n else:\n self.current.data = data\n if set_ptr:\n self.current = node\n<|end_body_3|>\n\n<|body_start_4|>\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset:\n iter = iter.next\n pace += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset:\n iter = iter.prev\n pace += 1\n d = iter.data\n p, n = (iter.prev, iter.next)\n iter.prev.next = n\n iter.next.prev = p\n self.length -= 1\n if iter == self.head:\n self.head = iter.next\n if set_ptr:\n self.current = iter.next\n return d\n<|end_body_4|>\n", "revision_id": "87011a22d5f1ebd1e7064c043678c3cdb02414e8", "skeleton": "<|skeleton|>\nclass DoubleLinkedList:\n \"\"\"Util class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\"\"\"\n\n def __init__(self, head):\n \"\"\"Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\"\"\"\n <|body_0|>\n\n def print(self):\n \"\"\"Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\"\"\"\n <|body_1|>\n\n def add(self, data, set_ptr=False):\n \"\"\"Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_2|>\n\n def insert(self, offset, data, set_ptr=False):\n \"\"\"Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n <|body_3|>\n\n def remove(self, offset, set_ptr=False):\n \"\"\"Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DoubleLinkedList:\n \"\"\"Util class to implement a double linked list, i.e. a linked list where items both have a connection to the previous and the next node. The list is circular, meaning that the successor of the last node is the first node in the list. The list has a \"head\" pointer to the first item in the list (arbitrarily chosen in the cycle as the first one added) and a \"current\" pointer to the currently selected node.\"\"\"\n\n def __init__(self, head):\n \"\"\"Initialization function for a double linked list. :param head: Reference to the first node in the list. :type head: DoubleLinkedListNode\"\"\"\n self.head = head\n self.current = head\n self.length = 1\n\n def print(self):\n \"\"\"Util function to display the double linked list, starting from the head (and stopping at the end of the first cycle).\"\"\"\n iter = self.head\n output = []\n in_loop = False\n while iter is not None:\n if in_loop and iter == self.head:\n break\n if iter == self.current:\n output.append('(%s)' % iter.data)\n else:\n output.append(str(iter.data))\n iter = iter.next\n if not in_loop:\n in_loop = True\n print(' '.join(output))\n\n def add(self, data, set_ptr=False):\n \"\"\"Adds a node at the end of the double linked list. :param data: Value of the node to add. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n if self.length == 1:\n node = DoubleLinkedListNode(data, self.head, self.head)\n self.head.next = node\n self.head.prev = node\n else:\n prev = self.head.prev\n node = DoubleLinkedListNode(data, prev, self.head)\n if prev is not None:\n prev.next = node\n self.head.prev = node\n self.length += 1\n if set_ptr:\n self.current = node\n\n def insert(self, offset, data, set_ptr=False):\n \"\"\"Inserts a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be inserted in the successors, else it will be inserted in the predecessors. :type offset: int :param data: Value of the node to insert. :type data: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the newly created node. :type set_ptr: bool\"\"\"\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset - 1:\n iter = iter.next\n pace += 1\n node = DoubleLinkedListNode(data, iter, iter.next)\n iter.next.prev = node\n iter.next = node\n self.length += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset - 1:\n iter = iter.prev\n pace += 1\n node = DoubleLinkedListNode(data, iter.prev, iter)\n iter.prev.next = node\n iter.prev = node\n self.length += 1\n else:\n self.current.data = data\n if set_ptr:\n self.current = node\n\n def remove(self, offset, set_ptr=False):\n \"\"\"Removes a node in the double linked list with a given offset from the currently selected node. :param offset: Offset from the currently selected node: if positive, the node will be removed from the successors, else it will be removed from the predecessors. :type offset: int :param set_ptr: If true, then the \"current\" pointer of the list will be assigned to the successor of the removed node. :type set_ptr: bool\"\"\"\n iter = self.current\n pace = 0\n if offset > 0:\n while iter.next is not None and pace < offset:\n iter = iter.next\n pace += 1\n elif offset < 0:\n while iter.prev is not None and pace < -offset:\n iter = iter.prev\n pace += 1\n d = iter.data\n p, n = (iter.prev, iter.next)\n iter.prev.next = n\n iter.next.prev = p\n self.length -= 1\n if iter == self.head:\n self.head = iter.next\n if set_ptr:\n self.current = iter.next\n return d\n", "source": "the_stack_v2_python_sparse", "source_path": "2018/Python/day9.py", "source_repo": "MinaPecheux/Advent-Of-Code", "split": "val", "star_events_count": 0}